source
stringlengths
3
92
c
stringlengths
26
2.25M
munit.c
/* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } MUNIT_PRINTF(6, 7) void munit_logm_ex(MunitLogLevel level, const char* filename, int line, void* mem, size_t size, const char* format, ...) { char * hex; int l; hex = malloc(size*2+1); memset(hex, 0, size*2+1); for (uint32_t i = 0; i < size; i++) { snprintf(hex+2*i, 3, "%.02x", ((unsigned char*)mem)[i]); } char *out; int out_len = size*2+1+2048; out = malloc(out_len); memset(out, 0, out_len); va_list ap; va_start(ap, format); l = vsnprintf(out, out_len, format, ap); l = snprintf(out+l, out_len-l, " %s", hex); va_end(ap); munit_logf_ex(level, filename, line, "%s", out); free(out); free(hex); } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #else # include <time.h> #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { munit_bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = 1; } else { ret = 0; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline munit_bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return 1; } else { return 0; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { munit_uint32_t seed, state; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wc = { 0, }; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; #else seed = (munit_uint32_t) time(NULL); #endif state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; munit_bool single_parameter_mode; void* user_data; MunitReport report; munit_bool colorize; munit_bool fork; munit_bool show_stderr; munit_bool fatal_failures; } MunitTestRunner; const MunitParameter* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param; return NULL; } #if defined(MUNIT_ENABLE_TIMING) static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } #endif /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, const MunitParameterValue* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; if (value != NULL) { (*params)[*params_size].value = *value; } else { (*params)[*params_size].value = (MunitParameterValue){.type = 0, .ptr = NULL}; } (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value.ptr = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (1); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, }; struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; munit_bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = 1; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = 0; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value.name); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = 1; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; MunitParameterValue* values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; values->name != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; munit_bool filled; unsigned int possible; MunitParameterValue* vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = 0; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { /* retrieve the full value*/ for (MunitParameterValue* value = pe->values; value != NULL; value++) { if (strcmp(value->name, cli_p->value.name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, value) != MUNIT_OK)) goto cleanup; filled = 1; break; } } if (filled) { break; } } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0].name == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, &pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, &pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for ( const MunitTest * const * tests_suite = suite->tests_suites; *tests_suite != NULL; tests_suite++) { for (test = *tests_suite ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, munit_bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; munit_bool first; MunitParameterValue* val; const MunitSuite* child_suite; for (const MunitTest* tests_suite = suite->tests_suites[0]; tests_suite != NULL; tests_suite++) { for (test = tests_suite ; test != NULL && test->test != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = 1; for (val = params->values ; val->name != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = 0; } fputs(val->name, stdout); } putc('\n', stdout); } } } } } munit_maybe_free_concat(pre, prefix, suite->prefix); } static munit_bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return 0; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = 0; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = 0; #if !defined(_WIN32) runner.fork = 1; #else runner.fork = 0; #endif runner.show_stderr = 0; runner.fatal_failures = 0; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (MunitParameterValue)munit_parameter_unspecified(argv[arg + 2]); parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = (MunitParameterValue)MUNIT_END_PARAMETER_VALUE; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = 1; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = 0; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = 1; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = 1; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = 0; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = 1; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 0, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 1, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
resample_utils.h
/* Copyright 2020 - 2021 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // We need to define AT_PARALLEL_OPENMP (even if -fopenmp is // not used) so that at::parallel_for is defined somewhere. // This must be done before <ATen/Parallel.h> is included. // // Note that if AT_PARALLEL_OPENMP = 1 but compilation does not use // -fopenmp, omp pragmas will be ignored. In that case, the code will // be effectively sequential, and we don't have to worry about // operations being atomic. #if !(AT_PARALLEL_OPENMP) #if !(AT_PARALLEL_NATIVE) #if !(AT_PARALLEL_NATIVE_TBB) #error No parallel backend specified #endif #endif #endif // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // These are defines that help writing generic code for both GPU and CPU #ifdef __CUDACC__ #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCAtomics.cuh> #define MONAI_INLINE __forceinline__ #define MONAI_DEVICE __device__ #define MONAI_HOST __host__ #define MONAI_ATOMIC_ADD monai::gpuAtomicAdd #define MONAI_NAMESPACE_DEVICE namespace cuda namespace monai { // atomicAdd API changed between pytorch 1.4 and 1.5. template <typename scalar_t, typename offset_t> static __forceinline__ __device__ void gpuAtomicAdd(scalar_t* ptr, offset_t offset, scalar_t value) { #if MONAI_TORCH_VERSION >= 10500 ::gpuAtomicAdd(ptr + offset, value); #else ::atomicAdd(ptr + offset, value); #endif } } // namespace monai #else #define MONAI_INLINE inline #define MONAI_DEVICE #define MONAI_HOST #define MONAI_ATOMIC_ADD monai::cpuAtomicAdd #define MONAI_NAMESPACE_DEVICE namespace cpu namespace monai { template <typename scalar_t, typename offset_t> static inline void cpuAtomicAdd(scalar_t* ptr, offset_t offset, scalar_t value) { #if AT_PARALLEL_OPENMP #if _OPENMP #pragma omp atomic #endif #endif ptr[offset] += value; } } // namespace monai #endif // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include <ATen/ATen.h> namespace monai { enum class BoundType : int64_t { Replicate, // Replicate last inbound value = clip coordinates DCT1, // Symmetric w.r.t. center of the last inbound voxel DCT2, // Symmetric w.r.t. edge of the last inbound voxel (=Neuman) DST1, // Asymmetric w.r.t. center of the last inbound voxel DST2, // Asymmetric w.r.t. edge of the last inbound voxel (=Dirichlet) DFT, // Circular / Wrap around the FOV Sliding, // For deformation-fields only: mixture of DCT2 and DST2 Zero, // Zero outside of the FOV NoCheck // /!\ Checks disabled: assume coordinates are inbound }; using BoundVectorRef = c10::ArrayRef<BoundType>; enum class InterpolationType : int64_t { Nearest, Linear, Quadratic, Cubic, FourthOrder, FifthOrder, SixthOrder, SeventhOrder }; using InterpolationVectorRef = c10::ArrayRef<InterpolationType>; static MONAI_INLINE MONAI_HOST std::ostream& operator<<(std::ostream& os, const BoundType& bound) { switch (bound) { case BoundType::Replicate: return os << "Replicate"; case BoundType::DCT1: return os << "DCT1"; case BoundType::DCT2: return os << "DCT2"; case BoundType::DST1: return os << "DST1"; case BoundType::DST2: return os << "DST2"; case BoundType::DFT: return os << "DFT"; case BoundType::Zero: return os << "Zero"; case BoundType::Sliding: return os << "Sliding"; case BoundType::NoCheck: return os << "NoCheck"; } return os << "Unknown bound"; } static MONAI_INLINE MONAI_HOST std::ostream& operator<<(std::ostream& os, const InterpolationType& itp) { switch (itp) { case InterpolationType::Nearest: return os << "Nearest"; case InterpolationType::Linear: return os << "Linear"; case InterpolationType::Quadratic: return os << "Quadratic"; case InterpolationType::Cubic: return os << "Cubic"; case InterpolationType::FourthOrder: return os << "FourthOrder"; case InterpolationType::FifthOrder: return os << "FifthOrder"; case InterpolationType::SixthOrder: return os << "SixthOrder"; case InterpolationType::SeventhOrder: return os << "SeventhOrder"; } return os << "Unknown interpolation order"; } } // namespace monai
tensor_cpu-inl.h
/*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle, int dev_id) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } #pragma GCC diagnostic pop } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); #ifndef __CUDACC__ #pragma omp parallel for #endif // temp remove openmp, as default setting throttles CPU for (openmp_index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<bool clip, typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const index_t K = dst.shape_[0]; const index_t C = dst.shape_[1]; for (index_t y = 0; y < index.size(0); ++y) { index_t j = index[y]; if (clip) { if (j <= 0) j = 0; else if (j >= K) j = K - 1; } else { j %= K; if (j < 0) j += K; } for (index_t i = 0; i < C; ++i) { dst[j][i] += src[y][i]; } } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename IndexType, typename DType> inline void IndexFill(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { for (index_t j = 0; j < src.size(1); j++) { dst[index[y]][j] = src[y][j]; } } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<size_t> idx(keys.size(0)); std::vector<KDType> keys_vec(keys.size(0)); std::vector<VDType> values_vec(values.size(0)); for (int i = 0; i < keys.size(0); i++) { idx[i] = i; keys_vec[i] = keys[i]; values_vec[i] = values[i]; } if (is_ascend) { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] < keys_vec[i2]; }); } else { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] > keys_vec[i2]; }); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = keys_vec[idx[i]]; values[i] = values_vec[idx[i]]; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
main.c
#include "modules/api.h" #include "core/core.h" #define STB_DXT_IMPLEMENTATION #include "stb_dxt.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <zlib.h> void modify_roi_in( dt_graph_t *graph, dt_module_t *mod) { dt_roi_t *r = &mod->connector[0].roi; r->scale = 1.0f; // scale to fit into requested roi if(graph->output_wd > 0 || graph->output_ht > 0) r->scale = MAX( r->full_wd / (float) graph->output_wd, r->full_ht / (float) graph->output_ht); r->wd = r->full_wd/r->scale; r->ht = r->full_ht/r->scale; r->wd = (r->wd/4)*4; // make sure we have bc1 blocks aligned r->ht = (r->ht/4)*4; } // called after pipeline finished up to here. // our input buffer will come in memory mapped. void write_sink( dt_module_t *module, void *buf) { const char *filename = dt_module_param_string(module, 0); // fprintf(stderr, "[o-bc1] writing '%s'\n", filename); const uint32_t wd = module->connector[0].roi.wd; const uint32_t ht = module->connector[0].roi.ht; const uint8_t *in = (const uint8_t *)buf; // go through all 4x4 blocks // parallelise via our thread pool or openmp or what? // probably usually bc1 thumbnails are too small to warrant a good speedup. const int bx = wd/4, by = ht/4; size_t num_blocks = bx * by; uint8_t *out = (uint8_t *)malloc(sizeof(uint8_t)*8*num_blocks); // #pragma omp parallel for collapse(2) schedule(static) for(int j=0;j<4*by;j+=4) { for(int i=0;i<4*bx;i+=4) { // swizzle block data together: uint8_t block[64]; for(int jj=0;jj<4;jj++) for(int ii=0;ii<4;ii++) for(int c=0;c<4;c++) block[4*(4*jj+ii)+c] = in[4*(wd*(j+jj)+(i+ii))+c]; stb_compress_dxt_block( out + 8*(bx*(j/4)+(i/4)), block, 0, 0); // or slower: STB_DXT_HIGHQUAL } } char tmpfile[1024]; snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename); gzFile f = gzopen(tmpfile, "wb"); // write magic, version, width, height uint32_t header[4] = { dt_token("bc1z"), 1, wd, ht }; gzwrite(f, header, sizeof(uint32_t)*4); gzwrite(f, out, sizeof(uint8_t)*8*num_blocks); gzclose(f); free(out); // atomically create filename only when we're quite done writing: unlink(filename); // just to be sure the link will work link(tmpfile, filename); unlink(tmpfile); }
convect_particles_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pablo Becker // // #if !defined(KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED ) #define KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED #define PRESSURE_ON_EULERIAN_MESH #define USE_FEW_PARTICLES // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "includes/variables.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "processes/node_erase_process.h" #include "utilities/binbased_fast_point_locator.h" #include <boost/timer.hpp> #include "utilities/timer.h" #ifdef _OPENMP #include "omp.h" #endif namespace Kratos { template<std::size_t TDim> class ParticleConvectUtily { public: KRATOS_CLASS_POINTER_DEFINITION(ParticleConvectUtily<TDim>); ParticleConvectUtily(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure) : mpSearchStructure(pSearchStructure) { } ~ParticleConvectUtily() { } //********************************************************************************************** //********************************************************************************************** ///this function moves all the nodes contained in rModelPart from their position at time tn to the one at time ///tn+1 by following the trajectories. This is done by performing "subdivions" forward euler steps within each time step ///@param rModelPart the model part on which we work ///@param subdivisions number of forward euler substeps used in advancing in time void MoveParticles_Substepping(ModelPart& rModelPart, unsigned int subdivisions) { KRATOS_TRY const double dt = rModelPart.GetProcessInfo()[DELTA_TIME]; const double small_dt = dt/ static_cast<double>(subdivisions); //do movement array_1d<double, 3 > veulerian; array_1d<double, 3 > acc_particle; Vector N(TDim + 1); const int max_results = rModelPart.Nodes().size(); typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,veulerian,acc_particle) for (int i = 0; i < nparticles; i++) { unsigned int substep = 0; ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); array_1d<double,3> current_position = iparticle->GetInitialPosition() + iparticle->FastGetSolutionStepValue(DISPLACEMENT,1); Element::Pointer pelement; bool is_found = false; array_1d<double, 3> aux_point_local_coordinates; while(substep++ < subdivisions) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); is_found = false; if(substep > 1 ) //first check if it falls within the same element { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); is_found = geom.IsInside(current_position, aux_point_local_coordinates, 1.0e-5); geom.ShapeFunctionsValues(N, aux_point_local_coordinates); if(is_found == false) is_found = mpSearchStructure->FindPointOnMesh(current_position, N, pelement, result_begin, max_results); } else //if not found use the search structure { is_found = mpSearchStructure->FindPointOnMesh(current_position, N, pelement, result_begin, max_results); } (iparticle)->Set(TO_ERASE, true); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = static_cast<double>(substep)/subdivisions; const double old_step_factor = 1.0 - new_step_factor; noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) ); noalias(current_position) += small_dt*veulerian; (iparticle)->Set(TO_ERASE, false); } else break; } if (is_found == true) { iparticle->FastGetSolutionStepValue(DISPLACEMENT) = current_position - iparticle->GetInitialPosition(); noalias(pparticle->Coordinates()) = current_position; } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** ///this function moves the mesh as xn+1 = xn + vn*dt and sets the mesh velocity to vn ///@param rModelPart the model part on which we work void MoveParticles_RK4(ModelPart& rModelPart) { KRATOS_TRY const double dt = rModelPart.GetProcessInfo()[DELTA_TIME]; //do movement array_1d<double, 3 > v1,v2,v3,v4,vtot,x; Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,v1,v2,v3,v4,vtot,x) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); array_1d<double,3> initial_position = iparticle->GetInitialPosition() + iparticle->FastGetSolutionStepValue(DISPLACEMENT,1); Element::Pointer pelement; bool is_found = false; //STEP1 { is_found = mpSearchStructure->FindPointOnMesh(initial_position, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(v1) = N[0] * ( geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v1) += N[k] * ( geom[k].FastGetSolutionStepValue(VELOCITY,1) ); } //STEP2 // if(is_found == true) { noalias(x) = initial_position + (0.5*dt)*v1; is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = 0.5; const double old_step_factor = 0.5; noalias(v2) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v2) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) ); } //STEP3 // if(is_found == true) { const array_1d<double,3> x = initial_position + (0.5*dt)*v2; is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = 0.5; //as the step before const double old_step_factor = 0.5; noalias(v3) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[0].FastGetSolutionStepValue(VELOCITY,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v3) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY) + old_step_factor*geom[k].FastGetSolutionStepValue(VELOCITY,1) ); } //STEP4 // if(is_found == true) { const array_1d<double,3> x = initial_position + (dt)*v3; is_found = mpSearchStructure->FindPointOnMesh(x, N, pelement, result_begin, max_results); if( is_found == false) goto end_of_particle; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(v4) = N[0] * ( geom[0].FastGetSolutionStepValue(VELOCITY)); for (unsigned int k = 1; k < geom.size(); k++) noalias(v4) += N[k] * ( geom[k].FastGetSolutionStepValue(VELOCITY) ); } (iparticle)->Set(TO_ERASE, false); //finalize step noalias(x) = initial_position; noalias(x) += 0.16666666666666666666667*dt*v1; noalias(x) += 0.33333333333333333333333*dt*v2; noalias(x) += 0.33333333333333333333333*dt*v3; noalias(x) += 0.16666666666666666666667*dt*v4; iparticle->FastGetSolutionStepValue(DISPLACEMENT) = x - iparticle->GetInitialPosition(); noalias(pparticle->Coordinates()) = x; end_of_particle: (iparticle)->Set(TO_ERASE, true); } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** ///this function erases the elements and conditions which have at least one node marked for erase ///@param rModelPart the model part on which we work void EraseOuterElements(ModelPart& rModelPart) { KRATOS_TRY int nerased_el = 0; for(ModelPart::ElementsContainerType::iterator it = rModelPart.ElementsBegin(); it!=rModelPart.ElementsEnd(); it++) { Geometry< Node<3> >& geom = it->GetGeometry(); // bool erase_el = false; for(unsigned int i=0; i<geom.size(); i++) { if(geom[i].Is(TO_ERASE)) { it->Set(TO_ERASE,true); nerased_el++; break; } } } if(nerased_el > 0) { ModelPart::ElementsContainerType temp_elems_container; temp_elems_container.reserve(rModelPart.Elements().size() - nerased_el); temp_elems_container.swap(rModelPart.Elements()); for(ModelPart::ElementsContainerType::iterator it = temp_elems_container.begin() ; it != temp_elems_container.end() ; it++) { if( it->IsNot(TO_ERASE) ) (rModelPart.Elements()).push_back(*(it.base())); } } KRATOS_CATCH("") } private: typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure; }; } // namespace Kratos. #endif // KRATOS_CONVECT_PARTICLES_UTILITIES_INCLUDED defined
ordered_processing.h
#ifndef ORDERED_PROCESSING_H_ #define ORDERED_PROCESSING_H_ #include "graph.h" #include "eager_priority_queue.h" const size_t kMaxBin = numeric_limits<size_t>::max()/2; template <typename PriorityT_> struct update_priority_min { void operator()(EagerPriorityQueue<PriorityT_>* pq, vector<vector<NodeID> >& local_bins, NodeID dst, PriorityT_ old_val, PriorityT_ new_val){ if (new_val < old_val) { bool changed_dist = true; while (!compare_and_swap(pq->priorities_[dst], old_val, new_val)) { old_val = pq->priorities_[dst]; if (old_val <= new_val) { changed_dist = false; break; } } if (changed_dist) { // assume the priority is mapped to a bin using delta size_t dest_bin; if (pq->delta_ != 1) dest_bin = new_val/pq->delta_; else dest_bin = new_val; if (dest_bin >= local_bins.size()) { local_bins.resize(dest_bin+1); } local_bins[dest_bin].push_back(dst); } } } }; template< class Priority, class EdgeApplyFunc , class WhileCond> void OrderedProcessingOperatorNoMerge(EagerPriorityQueue<Priority>* pq, const WGraph &g, WhileCond while_cond, EdgeApplyFunc edge_apply, NodeID optional_source_node){ pvector<NodeID> frontier(g.num_edges_directed()); // two element arrays for double buffering curr=iter&1, next=(iter+1)&1 //size_t shared_indexes[2] = {0, kMaxBin}; //size_t frontier_tails[2] = {1, 0}; pq->init_indexes_tails(); //optional source node frontier[0] = optional_source_node; #pragma omp parallel { vector<vector<NodeID> > local_bins(0); size_t iter = 0; while (while_cond()) { //TODO: refactor to use user supplied // while (user_supplied_condition()) // size_t &curr_bin_index = shared_indexes[iter&1]; // size_t &next_bin_index = shared_indexes[(iter+1)&1]; // size_t &curr_frontier_tail = frontier_tails[iter&1]; // size_t &next_frontier_tail = frontier_tails[(iter+1)&1]; size_t &curr_bin_index = pq->shared_indexes[iter&1]; size_t &next_bin_index = pq->shared_indexes[(iter+1)&1]; size_t &curr_frontier_tail = pq->frontier_tails[iter&1]; size_t &next_frontier_tail = pq->frontier_tails[(iter+1)&1]; #pragma omp for nowait schedule(dynamic, 64) for (size_t i=0; i < curr_frontier_tail; i++) { NodeID u = frontier[i]; //TODO: need to refactor to use user supplied filtering on the source node //if (src_filter(u)) { //hard code this into the library if (pq->priorities_[u] >= pq->delta_*pq->get_current_priority()){ for (WNode wn : g.out_neigh(u)) { edge_apply(local_bins, u, wn.v, wn.w); } } //end of if statement }//going through current frontier for end //searching for the next priority for (size_t i=pq->get_current_priority(); i < local_bins.size(); i++) { if (!local_bins[i].empty()) { #pragma omp critical next_bin_index = min(next_bin_index, i); break; } } #pragma omp barrier #pragma omp single nowait { //t.Stop(); //PrintStep(curr_bin_index, t.Millisecs(), curr_frontier_tail); // t.Start(); curr_bin_index = kMaxBin; curr_frontier_tail = 0; // need to make srue we increment it from only one thread pq->increment_iter(); } if (next_bin_index < local_bins.size()) { size_t copy_start = fetch_and_add(next_frontier_tail, local_bins[next_bin_index].size()); copy(local_bins[next_bin_index].begin(), local_bins[next_bin_index].end(), frontier.data() + copy_start); local_bins[next_bin_index].resize(0); } iter++; #pragma omp barrier } #pragma omp single cout << "order processing took " << iter << " iterations" << endl; }//end of pragma omp parallel } template<class Priority, class WhileCond, class EdgeApplyFunc > void OrderedProcessingOperatorWithMerge(EagerPriorityQueue<Priority>* pq, const WGraph &g, WhileCond while_cond, EdgeApplyFunc edge_apply, int bin_size_threshold = 1000, NodeID optional_source_node=-1){ pvector<NodeID> frontier(g.num_edges_directed()); // two element arrays for double buffering curr=iter&1, next=(iter+1)&1 //size_t shared_indexes[2] = {0, kMaxBin}; //size_t frontier_tails[2] = {1, 0}; pq->init_indexes_tails(); //optional source node frontier[0] = optional_source_node; #pragma omp parallel { vector<vector<NodeID> > local_bins(0); size_t iter = 0; while (while_cond()) { //TODO: refactor to use user supplied // while (user_supplied_condition()) // size_t &curr_bin_index = shared_indexes[iter&1]; // size_t &next_bin_index = shared_indexes[(iter+1)&1]; // size_t &curr_frontier_tail = frontier_tails[iter&1]; // size_t &next_frontier_tail = frontier_tails[(iter+1)&1]; size_t &curr_bin_index = pq->shared_indexes[iter&1]; size_t &next_bin_index = pq->shared_indexes[(iter+1)&1]; size_t &curr_frontier_tail = pq->frontier_tails[iter&1]; size_t &next_frontier_tail = pq->frontier_tails[(iter+1)&1]; #pragma omp for nowait schedule(dynamic, 64) for (size_t i=0; i < curr_frontier_tail; i++) { NodeID u = frontier[i]; //if (src_filter(u)) { if (pq->priorities_[u] >= pq->delta_*pq->get_current_priority()){ for (WNode wn : g.out_neigh(u)) { edge_apply(local_bins, u, wn.v, wn.w); } } //end of if statement }//going through current frontier for end while (local_bins.size() > 0 && curr_bin_index < local_bins.size() && !local_bins[curr_bin_index].empty()){ size_t cur_bin_size = local_bins[curr_bin_index].size(); if (cur_bin_size > bin_size_threshold) break; vector<NodeID> cur_bin_copy = local_bins[curr_bin_index]; local_bins[curr_bin_index].resize(0); for (size_t i=0; i < cur_bin_size; i++) { NodeID u = cur_bin_copy[i]; //if (src_filter(u)) { if (pq->priorities_[u] >= pq->delta_*pq->get_current_priority()){ for (WNode wn : g.out_neigh(u)) { edge_apply(local_bins, u, wn.v, wn.w); } } } } //searching for the next priority for (size_t i=pq->get_current_priority(); i < local_bins.size(); i++) { if (!local_bins[i].empty()) { #pragma omp critical next_bin_index = min(next_bin_index, i); break; } } #pragma omp barrier #pragma omp single nowait { //t.Stop(); //PrintStep(curr_bin_index, t.Millisecs(), curr_frontier_tail); // t.Start(); curr_bin_index = kMaxBin; curr_frontier_tail = 0; // need to make srue we increment it from only one thread pq->increment_iter(); } if (next_bin_index < local_bins.size()) { size_t copy_start = fetch_and_add(next_frontier_tail, local_bins[next_bin_index].size()); copy(local_bins[next_bin_index].begin(), local_bins[next_bin_index].end(), frontier.data() + copy_start); local_bins[next_bin_index].resize(0); } iter++; #pragma omp barrier } #pragma omp single cout << "took " << iter << " iterations" << endl; }//end of pragma omp parallel } #endif // ORDERED_PROCESSING_H
IrvingKirkwood.h
#ifndef __IRVING_KIRKWOOD_H__ #define __IRVING_KIRKWOOD_H__ #include <pyglasstools/Calculator.h> #include <omp.h> #include "cgfunc/CoarseGrainFunction.h" class PYBIND11_EXPORT IrvingKirkwood : public Calculator { public: IrvingKirkwood( std::shared_ptr< ParticleSystem > sysdata, std::shared_ptr< PairPotential > potential, std::shared_ptr< CoarseGrainFunction > cgfunc, std::shared_ptr< MPI::Communicator > comm ) : Calculator(sysdata,potential), m_cgfunc(cgfunc), m_comm(comm) { }; ~IrvingKirkwood(){}; void compute(const std::vector< Eigen::Vector3d >& gridpoints); virtual void addObservable(const std::shared_ptr<CoarseGrainedField>& obs) { m_observables.insert(std::pair<std::string, std::shared_ptr<CoarseGrainedField> >(obs->name, obs)); } virtual void printDisplacement() { for( auto p_i = m_sysdata->particles.begin(); p_i != m_sysdata->particles.end(); ++p_i) { int id = abr::get<abr::id>(*p_i); py::print(abr::get<displacement>(*p_i)[0],abr::get<displacement>(*p_i)[1],id); py::print("WHY",abr::get<displacement>(m_sysdata->particles[id])[0],abr::get<displacement>(m_sysdata->particles[id])[1],abr::get<abr::id>(*p_i)); } } virtual void clearState(unsigned int grid_id) { for (auto it=m_observables.begin(); it!=m_observables.end(); ++it) it->second->clear(grid_id); } virtual void computeLocalObsPerGrid(const AboriaParticles::value_type& particle_i, double cgval, unsigned int grid_id) { for (auto it=m_observables.begin(); it!=m_observables.end(); ++it) { if (it->second->islocal) it->second->accumulate(particle_i,cgval,grid_id); else continue; } } virtual void computePairObsPerGrid( const AboriaParticles::value_type& particle_i, const AboriaParticles::value_type& particle_j, Eigen::Vector3d rij, double bondval, unsigned int grid_id) { for (auto it=m_observables.begin(); it!=m_observables.end(); ++it) { if (!it->second->islocal) it->second->accumulate(particle_i,particle_j, rij, m_potential, bondval, grid_id); else continue; } } private: std::shared_ptr< CoarseGrainFunction > m_cgfunc; //!< particle system, equipped with neighbor list std::shared_ptr< MPI::Communicator > m_comm; std::map< std::string, std::shared_ptr< CoarseGrainedField > > m_observables; }; //Compute a Global Observable void IrvingKirkwood::compute(const std::vector< Eigen::Vector3d >& gridpoints) { #pragma omp parallel for for (unsigned int i = 0; i < gridpoints.size(); ++i) { clearState(i); for( auto p_i = abr::euclidean_search(m_sysdata->particles.get_query(), abr::vdouble3(gridpoints[i][0],gridpoints[i][1],gridpoints[i][2]), m_cgfunc->getRcut()); p_i != false; ++p_i) { //Set grid point X and position of particle ri Eigen::Vector3d dr(p_i.dx()[0], p_i.dx()[1],p_i.dx()[2]); Eigen::Vector3d x = gridpoints[i]; Eigen::Vector3d ri = gridpoints[i]-dr; double cgval = m_cgfunc->getDeltaFunc(x,ri); computeLocalObsPerGrid(*p_i, cgval, i); //Compute a list of local observables //Next we loop through the j-th particles for the virial stress for( auto p_j = abr::euclidean_search(m_sysdata->particles.get_query(), abr::get<position>(*p_i), max_rcut); p_j != false; ++p_j) { //Make sure the particle is unique if (abr::get<abr::id>(*p_i) != abr::get<abr::id>(*p_j)) { //set the distance between particle i and particle j Eigen::Vector3d rij(-p_j.dx()[0], -p_j.dx()[1], -p_j.dx()[2]); double bondval = m_cgfunc->getBondFunc(x,ri,rij); //Don't forget to set diameters of the potential computePairObsPerGrid(*p_i, *p_j, rij, bondval, i); } } } } }; void export_IrvingKirkwood(py::module& m) { py::class_<IrvingKirkwood, Calculator, std::shared_ptr<IrvingKirkwood> >(m,"IrvingKirkwood") .def(py::init< std::shared_ptr< ParticleSystem >, std::shared_ptr< PairPotential >, std::shared_ptr< CoarseGrainFunction >, std::shared_ptr< MPI::Communicator > >()) .def("compute", &IrvingKirkwood::compute) .def("setSystemData", &IrvingKirkwood::setSystemData) .def("addObservable", &IrvingKirkwood::addObservable) .def("printDisplacement", &IrvingKirkwood::printDisplacement) ; }; #endif
9892.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 64) for (t6 = t4; t6 <= (t4 + 63 < nx - 1 ? t4 + 63 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 64) for (t6 = t4; t6 <= (t4 + 63 < nx - 1 ? t4 + 63 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 64) for (t6 = t4; t6 <= (t4 + 63 < nx - 2 ? t4 + 63 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 128) for (t10 = t8; t10 <= (ny - 2 < t8 + 127 ? ny - 2 : t8 + 127); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
convolutiondepthwise_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 25; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); float32x4_t _bias0 = vdupq_n_f32(bias0); #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18 "mov v8.16b, %25.16b \n" // v8 = _bias0 "mov v9.16b, %25.16b \n" // v9 = _bias0 "0: \n" "mov v10.16b, %25.16b \n" // v10 = _bias0 "mov v11.16b, %25.16b \n" // v11 = _bias0 "fmla v8.4s, v16.4s, %19.s[1] \n" "fmla v10.4s, v16.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r11 "fmla v9.4s, v17.4s, %19.s[1] \n" "fmla v11.4s, v17.4s, %18.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r15 "fmla v8.4s, v17.4s, %20.s[1] \n" "fmla v10.4s, v17.4s, %19.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r12 "fmla v9.4s, v18.4s, %20.s[1] \n" "fmla v11.4s, v18.4s, %19.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %19.s[2] \n" "fmla v10.4s, v19.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %19.s[2] \n" "fmla v11.4s, v20.4s, %18.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r17 "fmla v8.4s, v21.4s, %19.s[3] \n" "fmla v10.4s, v21.4s, %18.s[2] \n" "add %4, %4, #32 \n" "fmla v9.4s, v22.4s, %19.s[3] \n" "fmla v11.4s, v22.4s, %18.s[2] \n" // r2 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r20 r24 r28 "fmla v8.4s, v19.4s, %20.s[0] \n" "fmla v10.4s, v19.4s, %18.s[3] \n" "fmla v9.4s, v20.4s, %20.s[0] \n" "fmla v11.4s, v20.4s, %18.s[3] \n" "add %5, %5, #32 \n" "fmla v8.4s, v12.4s, %20.s[2] \n" "fmla v10.4s, v12.4s, %19.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r21 "fmla v9.4s, v13.4s, %20.s[2] \n" "fmla v11.4s, v13.4s, %19.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n" // r25 "fmla v8.4s, v13.4s, %21.s[2] \n" "fmla v10.4s, v13.4s, %20.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n" // r22 "fmla v9.4s, v14.4s, %21.s[2] \n" "fmla v11.4s, v14.4s, %20.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n" // r26 "fmla v8.4s, v21.4s, %20.s[3] \n" "fmla v10.4s, v21.4s, %19.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n" // r23 "fmla v9.4s, v22.4s, %20.s[3] \n" "fmla v11.4s, v22.4s, %19.s[2] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n" // r27 "fmla v8.4s, v19.4s, %21.s[0] \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "fmla v9.4s, v20.4s, %21.s[0] \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r3 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r30 r34 r38 "fmla v8.4s, v21.4s, %21.s[1] \n" "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v9.4s, v22.4s, %21.s[1] \n" "fmla v11.4s, v22.4s, %20.s[0] \n" "add %6, %6, #32 \n" "fmla v8.4s, v16.4s, %21.s[3] \n" "fmla v10.4s, v16.4s, %20.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r31 "fmla v9.4s, v17.4s, %21.s[3] \n" "fmla v11.4s, v17.4s, %20.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r35 "fmla v8.4s, v17.4s, %22.s[3] \n" "fmla v10.4s, v17.4s, %21.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r32 "fmla v9.4s, v18.4s, %22.s[3] \n" "fmla v11.4s, v18.4s, %21.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r36 "fmla v8.4s, v19.4s, %22.s[0] \n" "fmla v10.4s, v19.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r33 "fmla v9.4s, v20.4s, %22.s[0] \n" "fmla v11.4s, v20.4s, %20.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r37 "fmla v8.4s, v21.4s, %22.s[1] \n" "fmla v10.4s, v21.4s, %21.s[0] \n" "fmla v9.4s, v22.4s, %22.s[1] \n" "fmla v11.4s, v22.4s, %21.s[0] \n" // r4 "prfm pldl1keep, [%7, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n" // v12 v13 v14 = r40 r44 r48 "fmla v8.4s, v19.4s, %22.s[2] \n" "fmla v10.4s, v19.4s, %21.s[1] \n" "add %7, %7, #32 \n" "fmla v9.4s, v20.4s, %22.s[2] \n" "fmla v11.4s, v20.4s, %21.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r41 "fmla v8.4s, v12.4s, %23.s[0] \n" "fmla v10.4s, v12.4s, %21.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n" // r45 "fmla v9.4s, v13.4s, %23.s[0] \n" "fmla v11.4s, v13.4s, %21.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n" // r42 "fmla v8.4s, v13.4s, %24.s[0] \n" "fmla v10.4s, v13.4s, %22.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n" // r46 "fmla v9.4s, v14.4s, %24.s[0] \n" "fmla v11.4s, v14.4s, %22.s[3] \n" // r0 and r5 "prfm pldl1keep, [%3, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n" // v16 v17 v18 = r00 r04 r08 "fmla v8.4s, v21.4s, %23.s[1] \n" "fmla v10.4s, v21.4s, %22.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n" // r43 "fmla v9.4s, v22.4s, %23.s[1] \n" "fmla v11.4s, v22.4s, %22.s[0] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n" // r47 "fmla v8.4s, v19.4s, %23.s[2] \n" "fmla v10.4s, v19.4s, %22.s[1] \n" "prfm pldl1keep, [%8, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n" // v12 v13 v14 = r50 r54 r58 "fmla v9.4s, v20.4s, %23.s[2] \n" "fmla v11.4s, v20.4s, %22.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v21.4s, %23.s[3] \n" "fmla v10.4s, v21.4s, %22.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #4 \n" // r51 "fmla v9.4s, v22.4s, %23.s[3] \n" "fmla v11.4s, v22.4s, %22.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r05 "fmla v8.4s, v16.4s, %18.s[0] \n" "fmla v10.4s, v12.4s, %23.s[0] \n" "ext v24.16b, v13.16b, v14.16b, #4 \n" // r55 "fmla v9.4s, v17.4s, %18.s[0] \n" "fmla v11.4s, v13.4s, %23.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v8.4s, v17.4s, %19.s[0] \n" "fmla v10.4s, v13.4s, %24.s[0] \n" "ext v25.16b, v12.16b, v13.16b, #8 \n" // r52 "fmla v9.4s, v18.4s, %19.s[0] \n" "fmla v11.4s, v14.4s, %24.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r06 "fmla v8.4s, v19.4s, %18.s[1] \n" "fmla v10.4s, v23.4s, %23.s[1] \n" "ext v26.16b, v13.16b, v14.16b, #8 \n" // r56 "fmla v9.4s, v20.4s, %18.s[1] \n" "fmla v11.4s, v24.4s, %23.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v21.4s, %18.s[2] \n" "fmla v10.4s, v25.4s, %23.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r53 "fmla v9.4s, v22.4s, %18.s[2] \n" "fmla v11.4s, v26.4s, %23.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r07 "fmla v8.4s, v19.4s, %18.s[3] \n" "fmla v10.4s, v23.4s, %23.s[3] \n" "ext v24.16b, v13.16b, v14.16b, #12 \n" // r57 "fmla v9.4s, v20.4s, %18.s[3] \n" "add %3, %3, #32 \n" "fmla v11.4s, v24.4s, %23.s[3] \n" "add %8, %8, #32 \n" // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18 "subs %w0, %w0, #1 \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "mov v8.16b, %25.16b \n" // v8 = _bias0 "mov v9.16b, %25.16b \n" // v9 = _bias0 "st1 {v10.4s, v11.4s}, [%2], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26"); } if (remain >= 4) { remain -= 4; asm volatile( // r1 "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" // v12 v13 = r10 r14 "mov v8.16b, %23.16b \n" // v8 = _bias0 "mov v9.16b, %23.16b \n" // v9 = _bias0 "fmul v10.4s, v12.4s, %17.s[1] \n" "fmul v11.4s, v12.4s, %16.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v8.4s, v13.4s, %18.s[1] \n" "fmla v9.4s, v13.4s, %17.s[0] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v10.4s, v21.4s, %17.s[2] \n" "fmla v11.4s, v21.4s, %16.s[1] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v8.4s, v22.4s, %17.s[3] \n" "fmla v9.4s, v22.4s, %16.s[2] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4s, v17.4s}, [%4] \n" // v16 v17 = r20 r24 "fmla v10.4s, v23.4s, %18.s[0] \n" "fmla v11.4s, v23.4s, %16.s[3] \n" "add %4, %4, #16 \n" "fmla v8.4s, v16.4s, %18.s[2] \n" "fmla v9.4s, v16.4s, %17.s[1] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v10.4s, v17.4s, %19.s[2] \n" "fmla v11.4s, v17.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v8.4s, v18.4s, %18.s[3] \n" "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v10.4s, v19.4s, %19.s[0] \n" "fmla v11.4s, v19.4s, %17.s[3] \n" // r3 "prfm pldl1keep, [%5, #256] \n" "ld1 {v12.4s, v13.4s}, [%5] \n" // v12 v13 = r30 r34 "fmla v8.4s, v20.4s, %19.s[1] \n" "fmla v9.4s, v20.4s, %18.s[0] \n" "add %5, %5, #16 \n" "fmla v10.4s, v12.4s, %19.s[3] \n" "fmla v11.4s, v12.4s, %18.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r31 "fmla v8.4s, v13.4s, %20.s[3] \n" "fmla v9.4s, v13.4s, %19.s[2] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r32 "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v11.4s, v21.4s, %18.s[3] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r33 "fmla v8.4s, v22.4s, %20.s[1] \n" "fmla v9.4s, v22.4s, %19.s[0] \n" // r4 "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4s, v17.4s}, [%6] \n" // v16 v17 = r40 r44 "fmla v10.4s, v23.4s, %20.s[2] \n" "fmla v11.4s, v23.4s, %19.s[1] \n" "add %6, %6, #16 \n" "fmla v8.4s, v16.4s, %21.s[0] \n" "fmla v9.4s, v16.4s, %19.s[3] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v10.4s, v17.4s, %22.s[0] \n" "fmla v11.4s, v17.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v8.4s, v18.4s, %21.s[1] \n" "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v10.4s, v19.4s, %21.s[2] \n" "fmla v11.4s, v19.4s, %20.s[1] \n" // r0 "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4s, v17.4s}, [%2] \n" // v16 v17 = r00 r04 "fmla v8.4s, v20.4s, %21.s[3] \n" "fmla v9.4s, v20.4s, %20.s[2] \n" // r5 "prfm pldl1keep, [%7, #256] \n" "ld1 {v12.4s, v13.4s}, [%7] \n" // v12 v13 = r50 r54 "fmla v10.4s, v16.4s, %16.s[0] \n" "fmla v11.4s, v12.4s, %21.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v17.4s, %17.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r51 "fmla v9.4s, v13.4s, %22.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v10.4s, v18.4s, %16.s[1] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r52 "fmla v11.4s, v21.4s, %21.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v19.4s, %16.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r53 "fmla v9.4s, v22.4s, %21.s[2] \n" "add %3, %3, #16 \n" "fmla v10.4s, v20.4s, %16.s[3] \n" "fmla v11.4s, v23.4s, %21.s[3] \n" "add %2, %2, #16 \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "add %7, %7, #16 \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" : "=r"(outptr), // %0 "=r"(outptr2), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr), "1"(outptr2), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k0123), // %16 "w"(_k4567), // %17 "w"(_k891011), // %18 "w"(_k12131415), // %19 "w"(_k16171819), // %20 "w"(_k20212223), // %21 "w"(_k24242424), // %22 "w"(_bias0) // %23 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } #else if (nn > 0) { asm volatile( // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14 "vmov q8, %q25 \n" // q8 = _bias0 "0: \n" "vmov q9, %q25 \n" // q9 = _bias0 "vmla.f32 q8, q14, %e19[1] \n" "vmla.f32 q9, q14, %e18[0] \n" "vext.32 q12, q14, q15, #1 \n" // r11 "vmla.f32 q8, q15, %e20[1] \n" "vmla.f32 q9, q15, %e19[0] \n" "vext.32 q13, q14, q15, #2 \n" // r12 "vmla.f32 q8, q12, %f19[0] \n" "vmla.f32 q9, q12, %e18[1] \n" "vext.32 q12, q14, q15, #3 \n" // r13 "vmla.f32 q8, q13, %f19[1] \n" "vmla.f32 q9, q13, %f18[0] \n" // r2 "pld [%5, #256] \n" "vld1.f32 {d20-d23}, [%5] \n" // q10 q11 = r20 r24 "vmla.f32 q8, q12, %e20[0] \n" "vmla.f32 q9, q12, %f18[1] \n" "add %5, #16 \n" "vmla.f32 q8, q10, %f20[0] \n" "vmla.f32 q9, q10, %e19[1] \n" "vext.32 q12, q10, q11, #1 \n" // r21 "vmla.f32 q8, q11, %f21[0] \n" "vmla.f32 q9, q11, %e20[1] \n" "vext.32 q13, q10, q11, #2 \n" // r22 "vmla.f32 q8, q12, %f20[1] \n" "vmla.f32 q9, q12, %f19[0] \n" "vext.32 q12, q10, q11, #3 \n" // r23 "vmla.f32 q8, q13, %e21[0] \n" "vmla.f32 q9, q13, %f19[1] \n" // r3 "pld [%6, #256] \n" "vld1.f32 {d28-d31}, [%6] \n" // q14 q15 = r30 r34 "vmla.f32 q8, q12, %e21[1] \n" "vmla.f32 q9, q12, %e20[0] \n" "add %6, #16 \n" "vmla.f32 q8, q14, %f21[1] \n" "vmla.f32 q9, q14, %f20[0] \n" "vext.32 q12, q14, q15, #1 \n" // r31 "vmla.f32 q8, q15, %f22[1] \n" "vmla.f32 q9, q15, %f21[0] \n" "vext.32 q13, q14, q15, #2 \n" // r32 "vmla.f32 q8, q12, %e22[0] \n" "vmla.f32 q9, q12, %f20[1] \n" "vext.32 q12, q14, q15, #3 \n" // r33 "vmla.f32 q8, q13, %e22[1] \n" "vmla.f32 q9, q13, %e21[0] \n" // r4 "pld [%7, #256] \n" "vld1.f32 {d20-d23}, [%7] \n" // q10 q11 = r40 r44 "vmla.f32 q8, q12, %f22[0] \n" "vmla.f32 q9, q12, %e21[1] \n" "add %7, #16 \n" "vmla.f32 q8, q10, %e23[0] \n" "vmla.f32 q9, q10, %f21[1] \n" "vext.32 q12, q10, q11, #1 \n" // r41 "vmla.f32 q8, q11, %e24[0] \n" "vmla.f32 q9, q11, %f22[1] \n" "vext.32 q13, q10, q11, #2 \n" // r42 "vmla.f32 q8, q12, %e23[1] \n" "vmla.f32 q9, q12, %e22[0] \n" "vext.32 q12, q10, q11, #3 \n" // r43 "vmla.f32 q8, q13, %f23[0] \n" "vmla.f32 q9, q13, %e22[1] \n" // r0 and r5 "pld [%3, #256] \n" "vld1.f32 {d20-d23}, [%3] \n" // q10 q11 = r00 r04 "vmla.f32 q8, q12, %f23[1] \n" "vmla.f32 q9, q12, %f22[0] \n" // r5 "pld [%8, #256] \n" "vld1.f32 {d28-d31}, [%8] \n" // q14 q15 = r50 r54 "vmla.f32 q8, q10, %e18[0] \n" "vmla.f32 q9, q14, %e23[0] \n" "vext.32 q12, q10, q11, #1 \n" // r01 "vmla.f32 q8, q11, %e19[0] \n" "vmla.f32 q9, q15, %e24[0] \n" "vext.32 q13, q14, q15, #1 \n" // r51 "vmla.f32 q8, q12, %e18[1] \n" "vext.32 q12, q10, q11, #2 \n" // r02 "vmla.f32 q9, q13, %e23[1] \n" "vext.32 q13, q14, q15, #2 \n" // r52 "vmla.f32 q8, q12, %f18[0] \n" "vext.32 q12, q10, q11, #3 \n" // r03 "vmla.f32 q9, q13, %f23[0] \n" "vext.32 q13, q14, q15, #3 \n" // r33 "vmla.f32 q8, q12, %f18[1] \n" "add %3, #16 \n" "vmla.f32 q9, q13, %f23[1] \n" "add %4, #16 \n" // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14 "add %8, #16 \n" "vst1.f32 {d16-d17}, [%1]! \n" "vmov q8, %q25 \n" // q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%2]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = bias0; float sum2 = bias0; #if __ARM_NEON // TODO neon assembly optimize float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 += r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // v10 v11 // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08 "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "0: \n" "fmul v10.4s, v16.4s, %14.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r01 "fmul v11.4s, v17.4s, %14.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r05 "fmla v8.4s, v17.4s, %15.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v9.4s, v18.4s, %15.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r06 "fmla v10.4s, v19.4s, %14.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v11.4s, v20.4s, %14.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r07 "fmla v8.4s, v21.4s, %14.s[2] \n" "fmla v9.4s, v22.4s, %14.s[2] \n" // r1 "prfm pldl1keep, [%3, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n" // v12 v13 v14 = r10 r14 r18 "fmla v10.4s, v19.4s, %14.s[3] \n" "fmla v11.4s, v20.4s, %14.s[3] \n" "fmla v8.4s, v12.4s, %15.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v9.4s, v13.4s, %15.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n" // r15 "fmla v10.4s, v13.4s, %16.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v11.4s, v14.4s, %16.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %15.s[2] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %15.s[2] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n" // r17 "fmla v10.4s, v21.4s, %15.s[3] \n" "fmla v11.4s, v22.4s, %15.s[3] \n" // r2 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r20 r24 r28 "fmla v8.4s, v19.4s, %16.s[0] \n" "fmla v9.4s, v20.4s, %16.s[0] \n" "fmla v10.4s, v16.4s, %16.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v11.4s, v17.4s, %16.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r25 "fmla v8.4s, v17.4s, %17.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r26 "fmla v10.4s, v19.4s, %16.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v11.4s, v20.4s, %16.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r27 "fmla v8.4s, v21.4s, %17.s[0] \n" "fmla v9.4s, v22.4s, %17.s[0] \n" // r3 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r30 r34 r38 "fmla v10.4s, v19.4s, %17.s[1] \n" "fmla v11.4s, v20.4s, %17.s[1] \n" "fmla v8.4s, v12.4s, %17.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v9.4s, v13.4s, %17.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n" // r15 "fmla v10.4s, v13.4s, %18.s[3] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v11.4s, v14.4s, %18.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %18.s[0] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %18.s[0] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n" // r17 "fmla v10.4s, v21.4s, %18.s[1] \n" "fmla v11.4s, v22.4s, %18.s[1] \n" // r4 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r40 r44 r48 "fmla v8.4s, v19.4s, %18.s[2] \n" "fmla v9.4s, v20.4s, %18.s[2] \n" "fmla v10.4s, v16.4s, %19.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v11.4s, v17.4s, %19.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r45 "fmla v8.4s, v17.4s, %20.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r46 "fmla v10.4s, v19.4s, %19.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v11.4s, v20.4s, %19.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r47 "fmla v8.4s, v21.4s, %19.s[2] \n" "add %2, %2, #32 \n" "fmla v9.4s, v22.4s, %19.s[2] \n" "add %3, %3, #32 \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "add %4, %4, #32 \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08 "add %5, %5, #32 \n" "fadd v10.4s, v8.4s, v10.4s \n" "add %6, %6, #32 \n" "fadd v11.4s, v9.4s, v11.4s \n" "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "subs %w0, %w0, #1 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22"); } if (remain >= 4) { remain -= 4; asm volatile( // r0 "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4s, v17.4s}, [%1] \n" // v16 v17 = r00 r04 "mov v8.16b, %19.16b \n" // v8 = _bias0 "add %1, %1, #16 \n" "fmul v9.4s, v16.4s, %12.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v17.4s, %13.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v9.4s, v18.4s, %12.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v19.4s, %12.s[2] \n" // r1 "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" // v10 v11 = r10 r14 "fmla v9.4s, v20.4s, %12.s[3] \n" "add %2, %2, #16 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n" // r11 "fmla v9.4s, v11.4s, %14.s[1] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n" // r12 "fmla v8.4s, v12.4s, %13.s[2] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n" // r13 "fmla v9.4s, v13.4s, %13.s[3] \n" // r2 "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4s, v17.4s}, [%3] \n" // v16 v17 = r20 r24 "fmla v8.4s, v14.4s, %14.s[0] \n" "add %3, %3, #16 \n" "fmla v9.4s, v16.4s, %14.s[2] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v8.4s, v17.4s, %15.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v9.4s, v18.4s, %14.s[3] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v8.4s, v19.4s, %15.s[0] \n" // r3 "prfm pldl1keep, [%4, #256] \n" "ld1 {v10.4s, v11.4s}, [%4] \n" // v10 v11 = r30 r34 "fmla v9.4s, v20.4s, %15.s[1] \n" "add %4, %4, #16 \n" "fmla v8.4s, v10.4s, %15.s[3] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n" // r31 "fmla v9.4s, v11.4s, %16.s[3] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n" // r32 "fmla v8.4s, v12.4s, %16.s[0] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n" // r33 "fmla v9.4s, v13.4s, %16.s[1] \n" // r4 "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4s, v17.4s}, [%5] \n" // v16 v17 = r40 r44 "fmla v8.4s, v14.4s, %16.s[2] \n" "add %5, %5, #16 \n" "fmla v9.4s, v16.4s, %17.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v8.4s, v17.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v9.4s, v18.4s, %17.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v8.4s, v19.4s, %17.s[2] \n" "fmla v9.4s, v20.4s, %17.s[3] \n" "fadd v8.4s, v8.4s, v9.4s \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20"); } #else if (nn > 0) { asm volatile( // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04 "vmov q8, %q21 \n" // q8 = _bias0 "0: \n" "vmul.f32 q9, q10, %e14[0] \n" "vext.32 q12, q10, q11, #1 \n" // r01 "vmla.f32 q8, q11, %e15[0] \n" "vext.32 q13, q10, q11, #2 \n" // r02 "vmla.f32 q9, q12, %e14[1] \n" "vext.32 q12, q10, q11, #3 \n" // r03 "vmla.f32 q8, q13, %f14[0] \n" // r1 "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3] \n" // q14 q15 = r10 r14 "vmla.f32 q9, q12, %f14[1] \n" "add %3, #16 \n" "vmla.f32 q8, q14, %e15[1] \n" "vext.32 q12, q14, q15, #1 \n" // r11 "vmla.f32 q9, q15, %e16[1] \n" "vext.32 q13, q14, q15, #2 \n" // r12 "vmla.f32 q8, q12, %f15[0] \n" "vext.32 q12, q14, q15, #3 \n" // r13 "vmla.f32 q9, q13, %f15[1] \n" // r2 "pld [%4, #256] \n" "vld1.f32 {d20-d23}, [%4] \n" // q10 q11 = r20 r24 "vmla.f32 q8, q12, %e16[0] \n" "add %4, #16 \n" "vmla.f32 q9, q10, %f16[0] \n" "vext.32 q12, q10, q11, #1 \n" // r21 "vmla.f32 q8, q11, %f17[0] \n" "vext.32 q13, q10, q11, #2 \n" // r22 "vmla.f32 q9, q12, %f16[1] \n" "vext.32 q12, q10, q11, #3 \n" // r23 "vmla.f32 q8, q13, %e17[0] \n" // r3 "pld [%5, #256] \n" "vld1.f32 {d28-d31}, [%5] \n" // q14 q15 = r30 r34 "vmla.f32 q9, q12, %e17[1] \n" "add %5, #16 \n" "vmla.f32 q8, q14, %f17[1] \n" "vext.32 q12, q14, q15, #1 \n" // r31 "vmla.f32 q9, q15, %f18[1] \n" "vext.32 q13, q14, q15, #2 \n" // r32 "vmla.f32 q8, q12, %e18[0] \n" "vext.32 q12, q14, q15, #3 \n" // r33 "vmla.f32 q9, q13, %e18[1] \n" // r4 "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6] \n" // q10 q11 = r40 r44 "vmla.f32 q8, q12, %f18[0] \n" "add %6, #16 \n" "vmla.f32 q9, q10, %e19[0] \n" "vext.32 q12, q10, q11, #1 \n" // r41 "vmla.f32 q8, q11, %e20[0] \n" "vext.32 q13, q10, q11, #2 \n" // r42 "vmla.f32 q9, q12, %e19[1] \n" "vext.32 q12, q10, q11, #3 \n" // r43 "vmla.f32 q8, q13, %f19[0] \n" "add %2, #16 \n" "vmla.f32 q9, q12, %f19[1] \n" // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04 "vadd.f32 q9, q9, q8 \n" "vmov q8, %q21 \n" // q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON #if __aarch64__ // TODO neon assembly optimize float sum = bias0; float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #else // TODO neon assembly optimize asm volatile( "veor q14, q14 \n" "vext.32 q14, %q19, q14, #3 \n" // q14 = bias0 0 0 0 "vld1.f32 {d16-d17}, [%1] \n" // q8 = r00 r01 r02 r03 "vld1.f32 {d18-d19}, [%2] \n" // q9 = r10 r11 r12 r13(X) "add r4, %1, #16 \n" "vld1.f32 {d19[1]}, [r4] \n" "vext.32 q9, q9, q9, #3 \n" // q9 = r04 r10 r11 r12 "vmla.f32 q14, q8, %q12 \n" "add r4, %2, #12 \n" "vld1.f32 {d20}, [r4] \n" // d20 = r13 r14 "vld1.f32 {d21}, [%3] \n" // d21 = r20 r21 "vmla.f32 q14, q9, %q13 \n" "add r4, %3, #8 \n" "vld1.f32 {d22-d23}, [r4] \n" // q11 = r22 r23 r24 X "vld1.f32 {d23[1]}, [%4] \n" // q11 = r22 r23 r24 r30 "vmla.f32 q14, q10, %q14 \n" "add r4, %4, #4 \n" "vld1.f32 {d24-d25}, [r4] \n" // q12 = r31 r32 r33 r34 "vmla.f32 q14, q11, %q15 \n" "vld1.f32 {d26-d27}, [%5] \n" // q13 = r40 r41 r42 r43 "vmla.f32 q14, q12, %q16 \n" "veor d30, d30 \n" "add r4, %5, #16 \n" "vld1.f32 {d30[0]}, [r4] \n" // d30 = r44 0 "vmla.f32 q14, q13, %q17 \n" "vmla.f32 d28, d30, %e18 \n" "add %1, #4 \n" // h-sum "vadd.f32 d28, d28, d29 \n" "add %2, #4 \n" "add %3, #4 \n" "vpadd.f32 d28, d28, d28 \n" "add %4, #4 \n" "add %5, #4 \n" "vst1.f32 {d28[0]}, [%0]! \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #endif } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; //int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 25; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); float32x4_t _bias0 = vdupq_n_f32(bias0); #endif // __ARM_NEON int i = 0; // NOTE unroll outh 2 results somewhat speed drop :| (about -4%) // so we do not implement it here for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // r0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01 "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09 "0: \n" "fmul v10.4s, v16.4s, %14.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v20.4s, v21.4s}, [%2] \n" // v20 v21 = r016 r017 "fmul v11.4s, v18.4s, %14.s[0] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r02 "fmla v8.4s, v17.4s, %14.s[1] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r010 "fmla v9.4s, v19.4s, %14.s[1] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r03 "fmla v10.4s, v22.4s, %14.s[2] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r011 "fmla v11.4s, v25.4s, %14.s[2] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r04 "fmla v8.4s, v23.4s, %14.s[3] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r012 "fmla v9.4s, v26.4s, %14.s[3] \n" // r1 "prfm pldl1keep, [%3, #256] \n" "ld2 {v12.4s, v13.4s}, [%3], #32 \n" // v12 v13 = r10 r11 "fmla v10.4s, v24.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v14.4s, v15.4s}, [%3], #32 \n" // v14 v15 = r18 r19 "fmla v11.4s, v27.4s, %15.s[0] \n" "fmla v8.4s, v12.4s, %15.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v20.4s, v21.4s}, [%3] \n" // v20 v21 = r116 r117 "fmla v9.4s, v14.4s, %15.s[1] \n" "ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r12 "fmla v10.4s, v13.4s, %15.s[2] \n" "ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r110 "fmla v11.4s, v15.4s, %15.s[2] \n" "ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r13 "fmla v8.4s, v22.4s, %15.s[3] \n" "ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r111 "fmla v9.4s, v25.4s, %15.s[3] \n" "ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r14 "fmla v10.4s, v23.4s, %16.s[0] \n" "ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r112 "fmla v11.4s, v26.4s, %16.s[0] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld2 {v16.4s, v17.4s}, [%4], #32 \n" // v16 v17 = r20 r21 "fmla v8.4s, v24.4s, %16.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v18.4s, v19.4s}, [%4], #32 \n" // v18 v19 = r28 r29 "fmla v9.4s, v27.4s, %16.s[1] \n" "fmla v10.4s, v16.4s, %16.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v20.4s, v21.4s}, [%4] \n" // v20 v21 = r216 r217 "fmla v11.4s, v18.4s, %16.s[2] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r22 "fmla v8.4s, v17.4s, %16.s[3] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r210 "fmla v9.4s, v19.4s, %16.s[3] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r23 "fmla v10.4s, v22.4s, %17.s[0] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r211 "fmla v11.4s, v25.4s, %17.s[0] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r24 "fmla v8.4s, v23.4s, %17.s[1] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r212 "fmla v9.4s, v26.4s, %17.s[1] \n" // r3 "prfm pldl1keep, [%5, #256] \n" "ld2 {v12.4s, v13.4s}, [%5], #32 \n" // v12 v13 = r30 r31 "fmla v10.4s, v24.4s, %17.s[2] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v14.4s, v15.4s}, [%5], #32 \n" // v14 v15 = r38 r39 "fmla v11.4s, v27.4s, %17.s[2] \n" "fmla v8.4s, v12.4s, %17.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v20.4s, v21.4s}, [%5] \n" // v20 v21 = r316 r317 "fmla v9.4s, v14.4s, %17.s[3] \n" "ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r32 "fmla v10.4s, v13.4s, %18.s[0] \n" "ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r310 "fmla v11.4s, v15.4s, %18.s[0] \n" "ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r33 "fmla v8.4s, v22.4s, %18.s[1] \n" "ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r311 "fmla v9.4s, v25.4s, %18.s[1] \n" "ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r34 "fmla v10.4s, v23.4s, %18.s[2] \n" "ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r312 "fmla v11.4s, v26.4s, %18.s[2] \n" // r4 "prfm pldl1keep, [%6, #256] \n" "ld2 {v16.4s, v17.4s}, [%6], #32 \n" // v16 v17 = r40 r41 "fmla v8.4s, v24.4s, %18.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld2 {v18.4s, v19.4s}, [%6], #32 \n" // v18 v19 = r48 r49 "fmla v9.4s, v27.4s, %18.s[3] \n" "fmla v10.4s, v16.4s, %19.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld2 {v20.4s, v21.4s}, [%6] \n" // v20 v21 = r416 r417 "fmla v11.4s, v18.4s, %19.s[0] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r42 "fmla v8.4s, v17.4s, %19.s[1] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r410 "fmla v9.4s, v19.4s, %19.s[1] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r43 "fmla v10.4s, v22.4s, %19.s[2] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r411 "fmla v11.4s, v25.4s, %19.s[2] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r44 "fmla v8.4s, v23.4s, %19.s[3] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r412 "fmla v9.4s, v26.4s, %19.s[3] \n" "fmla v10.4s, v24.4s, %20.s[0] \n" // r0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01 "fmla v11.4s, v27.4s, %20.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09 "fadd v10.4s, v8.4s, v10.4s \n" "fadd v11.4s, v9.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "st1 {v10.4s, v11.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #else if (nn > 0) { asm volatile( // r0 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01 "vmov q8, %q21 \n" "pld [%2, #128] \n" "vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x "0: \n" "vmul.f32 q9, q10, %e14[0] \n" "vmov d26, d25 \n" // q13 = r09 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r02 "vmla.f32 q8, q11, %e14[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r03 "vmla.f32 q9, q14, %f14[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r04 "vmla.f32 q8, q15, %f14[1] \n" // r1 "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" // q10 q11 = r10 r11 "vmla.f32 q9, q14, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d24-d25}, [%3] \n" // q12 = r18 x x "vmla.f32 q8, q10, %e15[1] \n" "vmov d26, d25 \n" // q13 = r19 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r12 "vmla.f32 q9, q11, %f15[0] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r13 "vmla.f32 q8, q14, %f15[1] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r14 "vmla.f32 q9, q15, %e16[0] \n" // r2 "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4]! \n" // q10 q11 = r20 r21 "vmla.f32 q8, q14, %e16[1] \n" "pld [%4, #128] \n" "vld2.f32 {d24-d25}, [%4] \n" // q12 = r28 x x "vmla.f32 q9, q10, %f16[0] \n" "vmov d26, d25 \n" // q13 = r29 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r22 "vmla.f32 q8, q11, %f16[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r23 "vmla.f32 q9, q14, %e17[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r24 "vmla.f32 q8, q15, %e17[1] \n" // r3 "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5]! \n" // q10 q11 = r30 r31 "vmla.f32 q9, q14, %f17[0] \n" "pld [%5, #128] \n" "vld2.f32 {d24-d25}, [%5] \n" // q12 = r38 x x "vmla.f32 q8, q10, %f17[1] \n" "vmov d26, d25 \n" // q13 = r39 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r32 "vmla.f32 q9, q11, %e18[0] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r33 "vmla.f32 q8, q14, %e18[1] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r34 "vmla.f32 q9, q15, %f18[0] \n" // r4 "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6]! \n" // q10 q11 = r40 r41 "vmla.f32 q8, q14, %f18[1] \n" "pld [%6, #128] \n" "vld2.f32 {d24-d25}, [%6] \n" // q12 = r48 x x "vmla.f32 q9, q10, %e19[0] \n" "vmov d26, d25 \n" // q13 = r49 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r42 "vmla.f32 q8, q11, %e19[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r43 "vmla.f32 q9, q14, %f19[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r44 "vmla.f32 q8, q15, %f19[1] \n" // r0 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01 "vmla.f32 q9, q14, %e20[0] \n" "pld [%2, #128] \n" "vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x "vadd.f32 q9, q8, q9 \n" "vmov q8, %q21 \n" "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = bias0; #if __ARM_NEON // TODO neon assembly optimize float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr = sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
GB_binop__bset_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__bset_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bset_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__bset_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bset_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint8) // C=scalar+B GB (_bind1st__bset_uint8) // C=scalar+B' GB (_bind1st_tran__bset_uint8) // C=A+scalar GB (_bind2nd__bset_uint8) // C=A'+scalar GB (_bind2nd_tran__bset_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_BITSET (aij, bij, uint8_t, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, uint8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_UINT8 || GxB_NO_BSET_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bset_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, uint8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bset_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, uint8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bset_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
transpose.c
/** * Parallel in-place NxN matrix transpose. * * TODO: Variable primitive data types (supporting long, double, etc.). * TODO: NxM matrix transpose. */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> typedef struct Matrix { uint16_t rows; uint16_t cols; uint8_t** data; // pointer to pointer of list of values } Matrix; /** * @brief Initialize a matrix (of size `[n_rows, n_cols]`) with zeros. * * @param n_rows Number of rows. * @param n_cols Number of columns. * @returns Matrix of zeros. */ Matrix* zeros(uint16_t n_rows, uint16_t n_cols) { Matrix* matrix = (Matrix*) malloc(sizeof(Matrix)); matrix->rows = n_rows; matrix->cols = n_cols; // Allocate memory for the data, and fill with zeros. // matrix->data = (double*) calloc(n_cols*n_rows, sizeof(double)); uint8_t** data = (uint8_t**) malloc(sizeof(uint8_t*) * n_rows); for(uint16_t x=0; x<n_rows; x++) { data[x] = (uint8_t*) calloc(n_cols, sizeof(uint8_t)); } matrix->data = data; return matrix; } /** * @brief Create a matrix (2D tensor) with data provided. * * @param data Data to be inserted in row-major order. * @param n_rows Number of rows. * @param n_cols Number of columns. * @returns Matrix of size [n,m] with values inserted. */ Matrix* fill(uint8_t* data, uint16_t n_rows, uint16_t n_cols) { Matrix* matrix = zeros(n_rows, n_cols); for (uint16_t x=0; x < n_rows; x++) { for (uint16_t y=0; y < n_cols; y++) { matrix->data[x][y] = data[n_cols*x+y]; } } return matrix; } /// @brief Free an allocated matrix. void free_matrix(Matrix* m) { for (int i=0; i < m->rows; i++) { free(m->data[i]); } free(m->data); free(m); } /// @brief Print matrix. void print_matrix(Matrix* m) { for(uint16_t x=0; x < m->rows; x++) { for(uint16_t y=0; y < m->cols; y++) { // printf("%.2f\t", m->data[m->cols*x+y]); printf("%d\t", m->data[x][y]); } printf("\n"); } printf("\n"); } /** * In-place NxN matrix transpose. Unfortunately, due to time constraints and the * non-trivial nature of in-place O(1) NxM matrix transpose, we leave this * for future work. * * We assume that NxM matrix is stored in row-major order with zero-based * indexing. This means that the (n,m) element, for `n = [0,n-1]` and `m = [0, * m-1]`, is stored at the memory address a = Mn+m (plus some offset, which we * ignore). In the transposed MxN matrix, the corresponding (m,n) element is * stored at the address a' = Nm+n. * * @param arr Array to be transposed. */ void transpose(Matrix* arr) { // // Switch rows/cols (to ensure matrix is properly printed) // int tmp_rows = arr->rows; // arr->rows = arr->cols; // arr->cols = tmp_rows; // for (int i=0; i<arr->rows; i++) { // for (int j=0; j<arr->cols; j++) { // // Original memory location before permutation: a = Mn + m // double* orig_addr = &(arr->data[arr->cols*i+j]); // } // } // Use only as many threads as are available #ifdef _OPENMP omp_set_num_threads(omp_get_num_procs()); #endif #pragma omp parallel for for (uint16_t i=1; i<arr->rows; i++) { for (uint16_t j=0; j<i; j++) { uint8_t tmp = arr->data[i][j]; arr->data[i][j] = arr->data[j][i]; arr->data[j][i] = tmp; } } } int main(int argc, char *argv[]) { uint16_t N = atoi(argv[1]); uint8_t a[N*N]; for (uint32_t i=0; i<N*N; i++) { a[i] = (rand() % (255-0+1)) + 0; // generate random number between [0,255] } Matrix* orig = fill(a, N, N); // print_matrix(orig); transpose(orig); // print_matrix(orig); free_matrix(orig); }
DRB029-truedep1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program has data races due to true dependence within the loop at 63. Data race pair: a[i+1]@64:5 vs. a[i]@64:12 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for schedule(dynamic) for (i=0;i<len-1;i++) a[i+1]=a[i]+1; printf("a[50]=%d\n", a[50]); return 0; }
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _pn = vld1q_f32(r0+4); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); float32x4_t _p1 = vld1q_f32(r1); float32x4_t _p1n = vld1q_f32(r1+4); _outp = vfmaq_f32(_outp, _p1, _k1); _outpn = vfmaq_f32(_outpn, _p1n, _k1); float32x4_t _p2 = vld1q_f32(r2); float32x4_t _p2n = vld1q_f32(r2+4); _outp = vfmaq_f32(_outp, _p2, _k2); _outpn = vfmaq_f32(_outpn, _p2n, _k2); float32x4_t _p3 = vld1q_f32(r3); float32x4_t _p3n = vld1q_f32(r3+4); _outp = vfmaq_f32(_outp, _p3, _k3); _outpn = vfmaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _pn = vld1q_f32(r0+4); float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 8; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0+8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); float32x4x2_t _p1x2 = vld2q_f32(r1); float32x4_t _p1 = _p1x2.val[0]; float32x4x2_t _p1nx2 = vld2q_f32(r1+8); float32x4_t _p1n = _p1nx2.val[0]; _outp = vmlaq_f32(_outp, _p1, _k1); _outpn = vmlaq_f32(_outpn, _p1n, _k1); float32x4x2_t _p2x2 = vld2q_f32(r2); float32x4_t _p2 = _p2x2.val[0]; float32x4x2_t _p2nx2 = vld2q_f32(r2+8); float32x4_t _p2n = _p2nx2.val[0]; _outp = vmlaq_f32(_outp, _p2, _k2); _outpn = vmlaq_f32(_outpn, _p2n, _k2); float32x4x2_t _p3x2 = vld2q_f32(r3); float32x4_t _p3 = _p3x2.val[0]; float32x4x2_t _p3nx2 = vld2q_f32(r3+8); float32x4_t _p3n = _p3nx2.val[0]; _outp = vmlaq_f32(_outp, _p3, _k3); _outpn = vmlaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr+8, _outpn); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0+8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 16; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
parallel_master_taskloop_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp parallel master taskloop for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop'}} #pragma omp parallel master taskloop // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop'}} #pragma omp parallel master taskloop foo void test_no_clause(void) { int i; #pragma omp parallel master taskloop for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel master taskloop' must be a for loop}} #pragma omp parallel master taskloop ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel master taskloop for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i, a; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}} #pragma omp parallel master taskloop foo bar for (i = 0; i < 16; ++i) ; // expected-error@+1 {{directive '#pragma omp parallel master taskloop' cannot contain more than one 'nogroup' clause}} #pragma omp parallel master taskloop nogroup nogroup for (i = 0; i < 16; ++i) ; // expected-error@+1 {{unexpected OpenMP clause 'in_reduction' in directive '#pragma omp parallel master taskloop'}} #pragma omp parallel master taskloop in_reduction(+:a) for (i = 0; i < 16; ++i) ; } void test_non_identifiers(void) { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}} #pragma omp parallel master taskloop; for (i = 0; i < 16; ++i) ; // expected-warning@+3 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}} // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp parallel master taskloop'}} #pragma omp parallel #pragma omp parallel master taskloop linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}} #pragma omp parallel master taskloop private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}} #pragma omp parallel master taskloop, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(void); void test_collapse(void) { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel master taskloop collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel master taskloop collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel master taskloop collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel master taskloop collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel master taskloop' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel master taskloop collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel master taskloop collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel master taskloop collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel master taskloop collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel master taskloop collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel master taskloop collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel master taskloop collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}} #pragma omp parallel #pragma omp parallel master taskloop collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel master taskloop collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop', but found only 1}} #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp parallel master taskloop collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp parallel master taskloop collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel master taskloop collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel master taskloop collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel master taskloop collapse(5 - 5) for (i = 0; i < 16; ++i) ; } void test_private(void) { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel master taskloop private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel master taskloop private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel master taskloop private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel master taskloop private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel master taskloop private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel master taskloop private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel master taskloop private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel master taskloop lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel master taskloop lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel master taskloop lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel master taskloop lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel master taskloop lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel master taskloop lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel master taskloop firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel master taskloop firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel master taskloop firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel master taskloop firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel master taskloop lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel master taskloop lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel master taskloop lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages(void) { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel master taskloop for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel master taskloop for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}} #pragma omp parallel master taskloop for (__int128 ii = 0; ii < 10; ii++) { c[ii] = a[ii] + b[ii]; } }
pmm-OpenMP.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #define omp_set_num_threads(int) #define omp_in_parallel() 0 #define omp_set_dynamic(int) #endif int main(int argc, char **argv) { int i, j, k; //Argumento de entrada if(argc < 2){ fprintf(stderr, "Falta tamaño de filas/columnas\n"); exit(-1); } unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B) // Reserva de espacio de las matrices int **ma, **mb, **mc; ma = (int **) malloc(N*sizeof(int*)); // malloc necesita el tamaño en bytes mb = (int **) malloc(N*sizeof(int*)); //si no hay espacio suficiente malloc devuelve NULL mc = (int **) malloc(N*sizeof(int*)); for (i=0; i<N; i++){ ma[i] = (int*) malloc(N*sizeof(int)); mb[i] = (int*) malloc(N*sizeof(int)); mc[i] = (int*) malloc(N*sizeof(int)); } //inicialización de las matrices for (i=0; i<N; i++){ for (j=0; j<N; j++){ ma[i][j] = 0; //aquí es donde se almacenará el resultado mb[i][j] = 6; mc[i][j] = 4; } } double t1, t2, t_total; t1 = omp_get_wtime(); //MULTIPLICACION #pragma omp parallel for private(j,k) for (i=0; i<N; i++){ for (j=0; j<N; j++){ for(k=0;k<N; k++){ ma[i][j] += mb[i][k] * mc[k][j]; } } } t2 = omp_get_wtime(); t_total = t2 - t1; // Imprimir por pantalla el resultado de la primera y última linea de la matriz resultado (ma) printf("Tiempo = %11.9f\t Primera = %d\t Ultima=%d\n",t_total,ma[0][0],ma[N-1][N-1]); // Liberar la memoria for (i=0; i<N; i++) { free(ma[i]); free(mb[i]); free(mc[i]); } free(ma); free(mb); free(mc); return 0; }
enforce_detgammabar_constraint.h
void enforce_detgammabar_constraint(rfm_struct *restrict rfmstruct, const paramstruct *restrict params, REAL *restrict in_gfs) { #include "set_Cparameters.h" #pragma omp parallel for for(int i2=0; i2<Nxx_plus_2NGHOSTS2; i2++) { #include "rfm_files/rfm_struct__read2.h" for(int i1=0; i1<Nxx_plus_2NGHOSTS1; i1++) { #include "rfm_files/rfm_struct__read1.h" for(int i0=0; i0<Nxx_plus_2NGHOSTS0; i0++) { #include "rfm_files/rfm_struct__read0.h" /* * NRPy+ Finite Difference Code Generation, Step 1 of 1: Read from main memory and compute finite difference stencils: */ const double hDD00 = in_gfs[IDX4S(HDD00GF, i0,i1,i2)]; const double hDD01 = in_gfs[IDX4S(HDD01GF, i0,i1,i2)]; const double hDD02 = in_gfs[IDX4S(HDD02GF, i0,i1,i2)]; const double hDD11 = in_gfs[IDX4S(HDD11GF, i0,i1,i2)]; const double hDD12 = in_gfs[IDX4S(HDD12GF, i0,i1,i2)]; const double hDD22 = in_gfs[IDX4S(HDD22GF, i0,i1,i2)]; /* * NRPy+ Finite Difference Code Generation, Step 2 of 1: Evaluate SymPy expressions and write to main memory: */ in_gfs[IDX4S(HDD00GF, i0, i1, i2)] = cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))))*(hDD00 + 1) - 1; in_gfs[IDX4S(HDD01GF, i0, i1, i2)] = hDD01*cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))))); in_gfs[IDX4S(HDD02GF, i0, i1, i2)] = hDD02*cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))))); in_gfs[IDX4S(HDD11GF, i0, i1, i2)] = cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))))*(hDD11 + 1) - 1; in_gfs[IDX4S(HDD12GF, i0, i1, i2)] = hDD12*cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))))); in_gfs[IDX4S(HDD22GF, i0, i1, i2)] = cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))))*(hDD22 + 1) - 1; } // END LOOP: for(int i0=0; i0<Nxx_plus_2NGHOSTS0; i0++) } // END LOOP: for(int i1=0; i1<Nxx_plus_2NGHOSTS1; i1++) } // END LOOP: for(int i2=0; i2<Nxx_plus_2NGHOSTS2; i2++) }
DRB013-nowait-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is extracted from a paper: Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 Some threads may finish the for loop early and execute errors = dt[9]+1 while another thread may still be simultaneously executing the for worksharing region by writing to d[9], causing data races. Data race pair: a[i]@72:7 vs. a[9]@75:13. */ #include <stdio.h> #include <omp.h> int main() { int i; int error; int len = 1000; int a[len]; int b = 5; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { a[i] = i; } { #pragma omp parallel for private (i) firstprivate (len,b) for (i = 0; i <= len - 1; i += 1) { a[i] = b + a[i] * 5; } } error = a[9] + 1; printf("error = %d\n",error); return 0; }
templatemath.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <dll.h> #include <pointercast.h> #include <platformmath.h> #define BFLOAT16_MAX_VALUE 32737. #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifndef M_E #define M_E 2.718281828459 #endif namespace nd4j { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template <typename T> math_def inline bool nd4j_eq(T val1, T val2, double eps); template<typename T, typename Z> math_def inline Z nd4j_re(T val1, T val2); template<typename T, typename Z> math_def inline Z nd4j_rint(T val1); template<typename T, typename Z> math_def inline Z nd4j_copysign(T val1, T val2); //#ifndef __CUDACC__ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length); //#endif template<typename T, typename Z> math_def inline Z nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T, typename Z> math_def inline Z nd4j_cos(T val); template<typename T, typename Z> math_def inline Z nd4j_cosh(T val); template<typename X, typename Z> math_def inline Z nd4j_exp(X val); template<typename T, typename Z> math_def inline Z nd4j_floor(T val); template<typename X, typename Z> math_def inline Z nd4j_log(X val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2); template<typename T, typename Z> math_def inline Z nd4j_round(T val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X num, Y denom); template<typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X num, Y denom); template<typename T, typename Z> math_def inline Z nd4j_erf(T num); template<typename T, typename Z> math_def inline Z nd4j_erfc(T num); template<typename T, typename Z> math_def inline Z nd4j_sigmoid(T val) { return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val)); } template<typename T, typename Z> math_def inline Z nd4j_elu(T val) { if (val >= (T) 0.f) return val; else return nd4j_exp<T, Z>(val) - (Z) 1.0f; //return val >= 0.0 ? val : (nd4j_exp<T>(val) - 1.0); } template<typename T, typename Z> math_def inline Z nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; } template<typename T, typename Z> math_def inline Z nd4j_eluderivative(T val) { if (val >= (T) 0.0f) return (Z) 1.0f; else return nd4j_exp<T, Z>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T, typename Z> math_def inline Z nd4j_sin(T val); template<typename T, typename Z> math_def inline Z nd4j_sinh(T val); template<typename T, typename Z> math_def inline Z softplus(T val) { return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val)); } template<typename T, typename Z> math_def inline Z nd4j_softsign(T val) { return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val)); } template<typename X, typename Z> math_def inline Z nd4j_sqrt(X val); template<typename X, typename Z> math_def inline Z nd4j_tanh(X val); template<typename T, typename Z> math_def inline Z nd4j_tan(T val); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2) { return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2)); } template<typename T, typename Z> math_def inline Z nd4j_tan(T tval) { return p_tan<Z>(static_cast<Z>(tval)); } template<typename T, typename Z> math_def inline Z nd4j_tanhderivative(T val) { Z tanh = nd4j_tanh<T,Z>(val); return (Z) 1.0f - tanh * tanh; } template <typename T, typename Z> math_def inline T nd4j_sigmoidderivative(T val) { Z sigmoid = nd4j_sigmoid<T,Z>(val); return sigmoid * ((Z) 1.0f - sigmoid); } template<typename T, typename Z> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (Z) 1.0f / (y * y); } template<typename T, typename Z> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f; } template<typename T, typename Z> math_def inline Z nd4j_sign(T val) { return nd4j_sgn<T, Z>(val); } template<typename T, typename Z> math_def inline Z nd4j_signum(T val) { return nd4j_sgn<T, Z>(val); } //#ifndef __CUDACC__ /* template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } */ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length) { Z dot = (Z)0.0f; //#pragma omp simd reduction(+:dot) for(int e = 0; e < length; e++) { dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]); } return dot; } //#endif template<typename T, typename Z> math_def inline Z nd4j_acos(T val); template<typename T, typename Z> math_def inline Z nd4j_acosh(T val); template<typename T, typename Z> math_def inline Z nd4j_asin(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val); } template<typename T, typename Z> math_def inline Z nd4j_atan(T val); template<typename T, typename Z> math_def inline Z nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); } else return value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) { return (bfloat16) fabsf((float) value); } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return fabs(value); } template<> math_def inline int nd4j_abs<int>(int value) { return abs(value); } template<> math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) { return llabs(value); } template<> math_def inline bool nd4j_abs<bool>(bool value) { return value; } template<> math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) { return value; } template<> math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) { return value; } template<> math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) { return value; } template<> math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) { return value; } template<> math_def inline int8_t nd4j_abs<int8_t>(int8_t value) { return value < 0 ? -value : value; } template<> math_def inline int16_t nd4j_abs<int16_t>(int16_t value) { return value < 0 ? -value : value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) { return value == bfloat16::nan(); //0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isnan<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isnan<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isnan<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) { return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isinf<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isinf<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isinf<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1)); else return nd4j_abs<Nd4jLong>(val1); } template<> math_def inline bool nd4j_max(bool val1, bool val2) { return (val1 || val2) ? true : false; } template<typename T> math_def inline T nd4j_max(T val1, T val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline bool nd4j_min(bool val1, bool val2) { return (val1 && val2) ? true : false; } template<typename T> math_def inline T nd4j_min(T val1, T val2) { return val1 < val2 ? val1 : val2; } template <typename T> math_def inline bool nd4j_eq(T d1, T d2, double eps) { if (nd4j::math::nd4j_isinf<T>(d1) && nd4j::math::nd4j_isinf<T>(d2)) { if (d1 > 0 && d2 > 0) return true; else if (d1 < 0 && d2 < 0) return true; else return false; } auto diff = static_cast<double>(nd4j::math::nd4j_abs<T>(d1 - d2)); // works well except in the range of very large numbers if (diff <= eps) return true; // Knuth approach // works well except in the range of very small numbers if (diff <= nd4j::math::nd4j_max<double>(nd4j::math::nd4j_abs<double>(static_cast<double>(d1)), nd4j::math::nd4j_abs<double>(static_cast<double>(d2))) * eps) return true; return false; } template <typename X, typename Z> math_def inline Z nd4j_ceil(X val) { return static_cast<Z>(p_ceil<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_round(X val) { return static_cast<Z>(p_round<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_asin(X val) { return p_asin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atan(X val) { return p_atan<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atanh(X val) { return p_atanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cosh(X val) { return p_cosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_rint(X val) { return p_rint<X>(val); } template <typename X, typename Z> math_def inline Z nd4j_sinh(X val) { return p_sinh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acos(X val) { return p_acos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acosh(X val) { return p_acosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cos(X val) { return p_cos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_exp(X val) { return p_exp<X>(val); } template<typename X, typename Z> math_def inline Z nd4j_floor(X val) { return static_cast<Z>(p_floor<X>(val)); } template<typename X, typename Z> math_def inline Z nd4j_log(X val) { return static_cast<Z>(p_log<X>(val)); } /** * This func is special case - it must return floating point value, and optionally Y arg can be floating point argument * @tparam X * @tparam Y * @tparam Z * @param val * @param val2 * @return */ template <typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2) { return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template<typename T> math_def inline T nd4j_re(T val1, T val2) { if (val1 == (T) 0.0f && val2 == (T) 0.0f) return (T) 0.0f; return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X val, Y val2) { return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X val, Y val2) { return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Z> math_def inline Z nd4j_sin(X val) { return p_sin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sqrt(X val) { return p_sqrt<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_tanh(X val) { return p_tanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erf(X val) { return p_erf<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erfc(X val) { return p_erfc<Z>(static_cast<Z>(val)); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { int* address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) { int* address_as_ull = (int*) address; long addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = old.B.H + val; fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = old.B.L + val; fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val - __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val / __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { int* address_as_ull = (int*) address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val - __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } } #endif } } #endif /* TEMPLATEMATH_H_ */
convolution_3x3_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx2(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd42_pack8to1_int8_sse_avx2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); void conv3x3s1_winograd42_pack8to1_int8_sse_xop(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); #endif #endif static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx2(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #endif // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4); int p = 0; for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to1.channel(p / 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { #if __AVXVNNI__ || __AVX512VNNI__ || __XOP__ for (int i = 0; i < 4; i++) { const short* k00 = k0.row<const short>(q + i * 2); const short* k10 = k1.row<const short>(q + i * 2); const short* k20 = k2.row<const short>(q + i * 2); const short* k30 = k3.row<const short>(q + i * 2); const short* k01 = k0.row<const short>(q + i * 2 + 1); const short* k11 = k1.row<const short>(q + i * 2 + 1); const short* k21 = k2.row<const short>(q + i * 2 + 1); const short* k31 = k3.row<const short>(q + i * 2 + 1); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k10[k]; g00[3] = k11[k]; g00[4] = k20[k]; g00[5] = k21[k]; g00[6] = k30[k]; g00[7] = k31[k]; g00 += 8; } #else for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00[1] = k1.row<const short>(q + i)[k]; g00[2] = k2.row<const short>(q + i)[k]; g00[3] = k3.row<const short>(q + i)[k]; g00 += 4; } #endif } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd42_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { #if !(__AVX512VNNI__ || __AVXVNNI__ || __AVX2__ || __XOP__) #if NCNN_RUNTIME_CPU && NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { conv3x3s1_winograd42_pack8to1_int8_sse_avx2(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_RUNTIME_CPU && NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { conv3x3s1_winograd42_pack8to1_int8_sse_xop(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #endif int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0); __m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32)); __m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01); __m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03); __m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05); __m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001); __m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001); __m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203); __m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203); __m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405); __m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405); __m128i _v5 = _mm_set1_epi16(5); __m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5)); __m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2)); __m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2)); __m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5)); _mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m); _mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m); _mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m); _mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m); _mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m); _mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { __m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]); __m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]); __m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]); __m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]); __m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]); __m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]); __m128i _v5 = _mm_set1_epi16(5); __m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5)); __m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5)); _mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0); _mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1); _mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2); _mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3); _mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4); _mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __AVX2__ if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { short* tmpptr = tm2.row<short>(i / 4); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256i _r0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); _mm256_storeu_si256((__m256i*)tmpptr, _r0); _mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 32; } } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2); #else short* tmpptr = tm2.row<short>(i / 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); __m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); _mm_storeu_si128((__m128i*)tmpptr, _r0); _mm_storeu_si128((__m128i*)(tmpptr + 8), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 16; } } for (; i < tiles; i++) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2); #else short* tmpptr = tm2.row<short>(i / 2 + i % 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); _mm_storeu_si128((__m128i*)tmpptr, _r0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); const Mat kernel0_tm = kernel_tm.channel(p / 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); __m256i _sum4_5 = _mm256_setzero_si256(); __m256i _sum6_7 = _mm256_setzero_si256(); for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef); #else // 0 0 1 1 2 2 3 3 8 8 9 9 a a b b // 4 4 5 5 6 6 7 7 c c d d e e f f __m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0); __m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0); __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif __m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef); #else __m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1); __m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1); __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123); __m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123); __m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab); __m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab); __m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567); __m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567); __m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef); __m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17)); #endif r0 += 32; k0 += 32; } __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); __m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1)); _sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7); int sum[16]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); _mm256_storeu_si256((__m256i*)(sum + 8), _sum4_6); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm[2] = sum[8]; output1_tm[2] = sum[9]; output2_tm[2] = sum[10]; output3_tm[2] = sum[11]; output0_tm[3] = sum[12]; output1_tm[3] = sum[13]; output2_tm[3] = sum[14]; output3_tm[3] = sum[15]; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { #if __AVX2__ // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef); #else __m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val); __m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val); __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif #else // 0 1 2 3 4 5 6 7 __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val0_01 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val0_23 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val0_45 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val0_67 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(3, 3, 3, 3)); __m128i _val1_01 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val1_23 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val1_45 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val1_67 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val0_01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val0_23, _w1, _sum1); _sum2 = _mm_maddd_epi16(_val1_01, _w0, _sum2); _sum3 = _mm_maddd_epi16(_val1_23, _w1, _sum3); _sum0 = _mm_maddd_epi16(_val0_45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val0_67, _w3, _sum1); _sum2 = _mm_maddd_epi16(_val1_45, _w2, _sum2); _sum3 = _mm_maddd_epi16(_val1_67, _w3, _sum3); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0); __m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0); __m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1); __m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1); __m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123); __m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123); __m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567); __m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567); __m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123); __m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123); __m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567); __m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567); __m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01); __m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01); __m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01); __m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01); __m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23); __m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23); __m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23); __m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23); __m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45); __m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45); __m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45); __m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45); __m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67); __m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67); __m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67); __m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13)); #endif #endif r0 += 16; k0 += 32; } #if __AVX2__ __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); int sum[8]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); #else _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); int sum[8]; _mm_storeu_si128((__m128i*)sum, _sum0); _mm_storeu_si128((__m128i*)(sum + 4), _sum2); #endif output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm += 2; output1_tm += 2; output2_tm += 2; output3_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 __m128i _val = _mm_loadu_si128((const __m128i*)r0); #if __AVX2__ __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ // 0 1 0 1 x x x x // 0 1 0 1 0 1 0 1 __m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); __m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1); __m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val)); __m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val)); _val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); _val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); #endif #else __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val23, _w1, _sum1); _sum0 = _mm_maddd_epi16(_val45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val67, _w3, _sum1); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val_0123 = _mm_unpacklo_epi16(_val, _val); __m128i _val_4567 = _mm_unpackhi_epi16(_val, _val); __m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123); __m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123); __m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567); __m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567); __m128i _sl0 = _mm_mullo_epi16(_w0, _val01); __m128i _sh0 = _mm_mulhi_epi16(_w0, _val01); __m128i _sl1 = _mm_mullo_epi16(_w1, _val23); __m128i _sh1 = _mm_mulhi_epi16(_w1, _val23); __m128i _sl2 = _mm_mullo_epi16(_w2, _val45); __m128i _sh2 = _mm_mulhi_epi16(_w2, _val45); __m128i _sl3 = _mm_mullo_epi16(_w3, _val67); __m128i _sh3 = _mm_mulhi_epi16(_w3, _val67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3)); #endif #endif r0 += 8; k0 += 32; } #if __AVX2__ __m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1); #endif _sum0 = _mm_add_epi32(_sum0, _sum1); int sum[4]; _mm_storeu_si128((__m128i*)sum, _sum0); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); __m128i _sum4 = _mm_setzero_si128(); __m128i _sum5 = _mm_setzero_si128(); __m128i _sum6 = _mm_setzero_si128(); __m128i _sum7 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _val2 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _val3 = _mm_loadu_si128((const __m128i*)(r0 + 24)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl2 = _mm_mullo_epi16(_val2, _w0); __m128i _sh2 = _mm_mulhi_epi16(_val2, _w0); __m128i _sl3 = _mm_mullo_epi16(_val3, _w0); __m128i _sh3 = _mm_mulhi_epi16(_val3, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); _sum4 = _mm_add_epi32(_sum4, _mm_unpacklo_epi16(_sl2, _sh2)); _sum5 = _mm_add_epi32(_sum5, _mm_unpackhi_epi16(_sl2, _sh2)); _sum6 = _mm_add_epi32(_sum6, _mm_unpacklo_epi16(_sl3, _sh3)); _sum7 = _mm_add_epi32(_sum7, _mm_unpackhi_epi16(_sl3, _sh3)); k0 += 8; r0 += 32; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum4 = _mm_add_epi32(_sum4, _sum5); _sum6 = _mm_add_epi32(_sum6, _sum7); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm[2] = _mm_reduce_add_epi32(_sum4); output0_tm[3] = _mm_reduce_add_epi32(_sum6); output0_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); k0 += 8; r0 += 16; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val = _mm_loadu_si128((const __m128i*)r0); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); k0 += 8; r0 += 8; } _sum0 = _mm_add_epi32(_sum0, _sum1); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1; const int* output0_tm_1 = output0_tm_0 + tiles * 1; const int* output0_tm_2 = output0_tm_0 + tiles * 2; const int* output0_tm_3 = output0_tm_0 + tiles * 3; const int* output0_tm_4 = output0_tm_0 + tiles * 4; const int* output0_tm_5 = output0_tm_0 + tiles * 5; int* output0 = out0.row<int>(i * 4) + j * 4; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 // TODO sse optimize for (int m = 0; m < 5; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b; tmp[1][m] = tmp13a + tmp13b * 2; tmp[2][m] = tmp02a + tmp02b * 4; tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 5; m < 6; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4; tmp[1][m] = (tmp13a + tmp13b * 2) * 4; tmp[2][m] = (tmp02a + tmp02b * 4) * 4; tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { const int* tmp0 = tmp[m]; int tmp02a = tmp0[1] + tmp0[2]; int tmp13a = tmp0[1] - tmp0[2]; int tmp02b = tmp0[3] + tmp0[4]; int tmp13b = tmp0[3] - tmp0[4]; output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576; output0[1] = (tmp13a + tmp13b * 2) / 576; output0[2] = (tmp02a + tmp02b * 4) / 576; output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
tclo.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #include<stdio.h> #include<stdlib.h> void printMatrix(int **, int); int **allocateMatrix(int); void computeTC(int **matrix, int N) { int **reach = allocateMatrix(N); int i, j, k; for (i = 0; i < N; i++) for (j = 0; j < N; j++) reach[i][j] = matrix[i][j]; int t1, t2, t3, t4, t5; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; if (N >= 1) { for (t1=0;t1<=N-1;t1++) { for (t2=0;t2<=floord(N-1,16);t2++) { lbp=max(0,ceild(32*t2-N+1,32)); ubp=min(floord(N-1,32),t2); #pragma omp parallel for private(lbv,ubv,t4,t5) for (t3=lbp;t3<=ubp;t3++) { for (t4=32*t2-32*t3;t4<=min(N-1,32*t2-32*t3+31);t4++) { for (t5=32*t3;t5<=min(N-1,32*t3+31);t5++) { reach[t4][t5] = reach[t4][t5] || (reach[t4][t1] && reach[t1][t5]);; } } } } } } printMatrix(reach, N); } void printMatrix(int **matrix, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) printf ("%d ", matrix[i][j]); printf("\n"); } } int **allocateMatrix(int N) { int **t = (int **) malloc(sizeof(int *) * N); for (int i = 0 ; i < N ; i++) { t[i] = (int *) malloc(sizeof(int) * N); } return t; } int main(void) { const int N = 4; int **graph = allocateMatrix(N); int g[4][4]= { {1, 1, 0, 1}, {0, 1, 1, 0}, {0, 0, 1, 1}, {0, 0, 0, 1} }; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) graph[i][j] = g [i][j]; printMatrix(graph, 4); computeTC(graph, 4); return 0; }
matmult.c
/****************************************************************************** * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. * * Modified from here: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c * * For PAPI_FP_INS, the exclusive count for the event: * for (null) [OpenMP location: file:matmult.c ] * should be 2E+06 / Number of Threads ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "matmult_initialize.h" #ifdef TAU_MPI int provided; #include <mpi.h> /* NOTE: MPI is just used to spawn multiple copies of the kernel to different ranks. This is not a parallel implementation */ #endif /* TAU_MPI */ #ifdef PTHREADS #include <pthread.h> #include <unistd.h> #include <errno.h> /*** NOTE THE ATTR INITIALIZER HERE! ***/ pthread_mutex_t mutexsum; #endif /* PTHREADS */ #ifndef MATRIX_SIZE #define MATRIX_SIZE 512 #endif #define ITERATIONS 3 #define NRA MATRIX_SIZE /* number of rows in matrix A */ #define NCA MATRIX_SIZE /* number of columns in matrix A */ #define NCB MATRIX_SIZE /* number of columns in matrix B */ double** allocateMatrix(int rows, int cols) { int i; double **matrix = (double**)malloc((sizeof(double*)) * rows); for (i=0; i<rows; i++) { matrix[i] = (double*)malloc((sizeof(double)) * cols); } return matrix; } void freeMatrix(double** matrix, int rows, int cols) { int i; for (i=0; i<rows; i++) { free(matrix[i]); } free(matrix); } #ifdef APP_USE_INLINE_MULTIPLY __inline double multiply(double a, double b) { return a * b; } #endif /* APP_USE_INLINE_MULTIPLY */ #if 0 // cols_a and rows_b are the same value void compute_nested(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; double tmp = 0.0; //num_threads(2) #pragma omp parallel private(i) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait schedule(dynamic,1) for (i=0; i<rows_a; i++) { //num_threads(2) #pragma omp parallel private(i,j,k) shared(a,b,c) { #pragma omp for nowait schedule(dynamic,1) for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else tmp = a[i][k]; tmp = tmp * b[k][j]; c[i][j] += tmp; #endif } } } } } /*** End of parallel region ***/ } #endif // cols_a and rows_b are the same value void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for schedule(dynamic) nowait for (i=0; i<rows_a; i++) { for(j=0; j<cols_b; j++) { for (k=0; k<cols_a; k++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for schedule(dynamic) nowait for (i=0; i<rows_a; i++) { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } double do_work(void) { double **a, /* matrix A to be multiplied */ **b, /* matrix B to be multiplied */ **c; /* result matrix C */ a = allocateMatrix(NRA, NCA); b = allocateMatrix(NCA, NCB); c = allocateMatrix(NRA, NCB); /*** Spawn a parallel region explicitly scoping all variables ***/ initialize(a, NRA, NCA); initialize(b, NCA, NCB); initialize(c, NRA, NCB); compute(a, b, c, NRA, NCA, NCB); #if defined(TAU_OPENMP) #if 0 //if (omp_get_nested()) { compute_nested(a, b, c, NRA, NCA, NCB); //} #endif #endif compute_interchange(a, b, c, NRA, NCA, NCB); double result = c[0][1]; freeMatrix(a, NRA, NCA); freeMatrix(b, NCA, NCB); freeMatrix(c, NCA, NCB); return result; } #ifdef PTHREADS int busy_sleep() { int i, sum = 0; for (i = 0 ; i < 100000000 ; i++) { sum = sum+i; } return sum; } void * threaded_func(void *data) { int rc; int sum = 0; // compute int i; for (i = 0 ; i < ITERATIONS ; i++) { do_work(); } #ifdef APP_DO_LOCK_TEST // test locking - sampling should catch this if ((rc = pthread_mutex_lock(&mutexsum)) != 0) { errno = rc; perror("thread lock error"); exit(1); } fprintf(stderr,"Thread 'sleeping'...\n"); fflush(stderr); sum += busy_sleep(); fprintf(stderr,"Thread 'awake'...\n"); fflush(stderr); if ((rc = pthread_mutex_unlock(&mutexsum)) != 0) { errno = rc; perror("thread unlock error"); exit(1); } pthread_exit((void*) 0); #endif // APP_DO_LOCK_TEST return NULL; } #endif // PTHREADS int main (int argc, char *argv[]) { #ifdef PTHREADS int ret; pthread_attr_t attr; pthread_t tid1, tid2, tid3; pthread_mutexattr_t Attr; pthread_mutexattr_init(&Attr); #ifndef TAU_CRAYCNL pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_ERRORCHECK); #endif /* TAU_CRAYCNL */ if (pthread_mutex_init(&mutexsum, &Attr)) { printf("Error while using pthread_mutex_init\n"); } #endif /* PTHREADS */ #ifdef TAU_MPI int rc = MPI_SUCCESS; int rank = 0; int comm_size = 0; #if defined(PTHREADS) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) { printf("MPI_Init_thread: provided = %d, MPI_THREAD_MULTIPLE=%d\n", provided, MPI_THREAD_MULTIPLE); } #elif defined(TAU_OPENMP) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 0) { printf("MPI_Init_thread: provided = %d, MPI_THREAD_FUNNELED=%d\n", provided, MPI_THREAD_FUNNELED); } #else rc = MPI_Init(&argc, &argv); #endif /* THREADS */ if (rc != MPI_SUCCESS) { char *errorstring; int length = 0; MPI_Error_string(rc, errorstring, &length); printf("Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring); exit(1); } MPI_Comm_size(MPI_COMM_WORLD, &comm_size); #endif /* TAU_MPI */ #ifdef PTHREADS ret = pthread_create(&tid1, NULL, threaded_func, NULL); if (ret) { printf("Error: pthread_create (1) fails ret = %d\n", ret); exit(1); } ret = pthread_create(&tid2, NULL, threaded_func, NULL); if (ret) { printf("Error: pthread_create (2) fails ret = %d\n", ret); exit(1); } ret = pthread_create(&tid3, NULL, threaded_func, NULL); if (ret) { printf("Error: pthread_create (3) fails ret = %d\n", ret); exit(1); } #endif /* PTHREADS */ #ifdef TAU_MPI // create a communicator /* The code above only works with 4 or more processes!! */ if (comm_size >=4 ) { MPI_Group group_world, odd_group, even_group, diff_group, union_group, inter_group, re_group, ri_group; int j, Neven, Nodd, members[8], ierr; MPI_Comm_group(MPI_COMM_WORLD, &group_world); MPI_Comm world_comm; MPI_Comm_create(MPI_COMM_WORLD, group_world, &world_comm); Neven = (comm_size+1)/2; /* processes of MPI_COMM_WORLD are divided */ Nodd = comm_size - Neven; /* into odd- and even-numbered groups */ for (j=0; j < Neven; j++) { /* "members" determines members of even_group */ members[j] = 2*j; }; MPI_Group_incl(group_world, Neven, members, &even_group); MPI_Group_excl(group_world, Neven, members, &odd_group); MPI_Comm even_comm; MPI_Comm odd_comm; MPI_Comm_create(MPI_COMM_WORLD, even_group, &even_comm); MPI_Comm_create(MPI_COMM_WORLD, odd_group, &odd_comm); MPI_Group_difference(group_world, even_group, &diff_group); MPI_Group_intersection(group_world, odd_group, &inter_group); MPI_Group_union(group_world, odd_group, &union_group); int range[2][3] = {{0,1,1},{2,3,1}}; MPI_Group_range_excl(group_world, 2, range, &re_group); MPI_Group_range_incl(group_world, 2, range, &ri_group); int ranks[2] = {0,1}; int ranks_out[2] = {0}; MPI_Group_translate_ranks(group_world, 2, ranks, union_group, ranks_out); } #endif /* TAU_MPI */ /* On thread 0: */ int i; for (i = 0 ; i < ITERATIONS ; i++) { printf("%d.", i);fflush(stdout); do_work(); } #ifdef PTHREADS ret = pthread_join(tid1, NULL); if (ret) { printf("Error: pthread_join (1) fails ret = %d\n", ret); exit(1); } ret = pthread_join(tid2, NULL); if (ret) { printf("Error: pthread_join (2) fails ret = %d\n", ret); exit(1); } ret = pthread_join(tid3, NULL); if (ret) { printf("Error: pthread_join (3) fails ret = %d\n", ret); exit(1); } pthread_mutex_destroy(&mutexsum); #endif /* PTHREADS */ #ifdef TAU_MPI MPI_Finalize(); #endif /* TAU_MPI */ printf ("Done.\n"); return 0; }
GB_binop__rminus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int32) // A*D function (colscale): GB (_AxD__rminus_int32) // D*A function (rowscale): GB (_DxB__rminus_int32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_int32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int32) // C=scalar+B GB (_bind1st__rminus_int32) // C=scalar+B' GB (_bind1st_tran__rminus_int32) // C=A+scalar GB (_bind2nd__rminus_int32) // C=A'+scalar GB (_bind2nd_tran__rminus_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-target-data.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int x) { #pragma omp target data map(x) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-data.c:3:1, line:6:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1> // CHECK-NEXT: `-OMPTargetDataDirective {{.*}} <line:4:9, col:31> // CHECK-NEXT: |-OMPMapClause {{.*}} <col:25, col:30> // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:29> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-NullStmt {{.*}} <col:3> openmp_structured_block // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-data.c:4:9) *const restrict'
ompGetNumThreads.c
/* Contributed by pranav@ics.forth.gr 4/14/2010 */ #include <stdio.h> #include <omp.h> // The non-existence of omp.h is essential to repeat the original bug //#include <omp.h> int main() { int k; #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } return 0; }
LAGraph_cc_fastsv2.c
/* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ /** * Code is based on the algorithm described in the following paper * Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component * Algorithm with Fast Convergence (SIAM PP20) * * Modified by Tim Davis, Texas A&M University **/ // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. #include "LAGraph.h" static inline void atomic_min_uint64 ( uint64_t *p, // input/output uint64_t value // input ) { uint64_t old, new ; do { // get the old value at (*p) #pragma omp atomic read old = (*p) ; // compute the new minimum new = LAGRAPH_MIN (old, value) ; } while (!__sync_bool_compare_and_swap (p, old, new)) ; } #define LAGRAPH_FREE_ALL //------------------------------------------------------------------------------ // Reduce_assign: w (index) += src //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT64, descriptor = NULL // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. static GrB_Info Reduce_assign ( GrB_Vector w, // vector of size n, all entries present GrB_Vector src, // vector of size n, all entries present GrB_Index *index, // array of size n GrB_Index n, GrB_Index *I, // size n, containing [0, 1, 2, ..., n-1] GrB_Index *mem, int nthreads ) { GrB_Index nw, ns; LAGr_Vector_nvals(&nw, w); LAGr_Vector_nvals(&ns, src); GrB_Index *sval = mem, *wval = sval + nw; LAGr_Vector_extractTuples(NULL, wval, &nw, w); LAGr_Vector_extractTuples(NULL, sval, &ns, src); #if 0 if (nthreads >= 4) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index i = 0; i < n; i++) { atomic_min_uint64 (&(wval [index [i]]), sval [i]) ; // if (sval[i] < wval[index[i]]) // wval[index[i]] = sval[i]; } } else #endif { for (GrB_Index i = 0; i < n; i++) { if (sval[i] < wval[index[i]]) wval[index[i]] = sval[i]; } } LAGr_Vector_clear(w); LAGr_Vector_build(w, I, wval, nw, GrB_PLUS_UINT64); return GrB_SUCCESS; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (I); \ LAGRAPH_FREE (V); \ LAGRAPH_FREE (mem); \ LAGr_free (&f) ; \ LAGr_free (&gp); \ LAGr_free (&mngp); \ LAGr_free (&gp_new); \ LAGr_free (&mod); \ if (sanitize) LAGr_free (&S); \ } //------------------------------------------------------------------------------ // LAGraph_cc_fastsv2 //------------------------------------------------------------------------------ GrB_Info LAGraph_cc_fastsv2 ( GrB_Vector *result, // output: array of component identifiers GrB_Matrix A, // input matrix bool sanitize // if true, ensure A is symmetric ) { GrB_Info info; GrB_Index n, *mem = NULL, *I = NULL, *V = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix S = NULL ; LAGr_Matrix_nrows (&n, A) ; if (sanitize) { // S = A | A' LAGr_Matrix_new (&S, GrB_BOOL, n, n) ; LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ; } else { // Use the input as-is, and assume it is symmetric S = A ; } // determine # of threads to use for Reduce_assign int nthreads_max = LAGraph_get_nthreads ( ) ; int nthreads = n / (1024*1024) ; nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // vectors LAGr_Vector_new(&f, GrB_UINT64, n); LAGr_Vector_new(&gp_new, GrB_UINT64, n); LAGr_Vector_new(&mod, GrB_BOOL, n); // temporary arrays I = LAGraph_malloc (n, sizeof(GrB_Index)); V = LAGraph_malloc (n, sizeof(uint64_t)) ; mem = (GrB_Index*) LAGraph_malloc (2*n, sizeof(GrB_Index)) ; // prepare vectors for (GrB_Index i = 0; i < n; i++) I[i] = V[i] = i; LAGr_Vector_build (f, I, V, n, GrB_PLUS_UINT64); LAGr_Vector_dup (&gp, f); LAGr_Vector_dup (&mngp,f); // main computation bool diff = true ; while (diff) { // hooking & shortcutting LAGr_mxv (mngp, 0, GrB_MIN_UINT64, GxB_MIN_SECOND_UINT64, S, gp, 0); LAGRAPH_OK (Reduce_assign (f, mngp, V, n, I, mem, nthreads)); LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, mngp, 0); LAGr_eWiseMult (f, 0, 0, GrB_MIN_UINT64, f, gp, 0); // calculate grandparent LAGr_Vector_extractTuples (NULL, V, &n, f); LAGr_extract (gp_new, 0, 0, f, V, n, 0); // check termination LAGr_eWiseMult (mod, 0, 0, GrB_NE_UINT64, gp_new, gp, 0); LAGr_reduce (&diff, 0, GxB_LOR_BOOL_MONOID, mod, 0); // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } // free workspace and return result *result = f; f = NULL ; LAGRAPH_FREE_ALL ; return GrB_SUCCESS; }
pr49640.c
/* PR middle-end/49640 */ /* { dg-do compile } */ /* { dg-options "-O2 -std=gnu99 -fopenmp" } */ void foo (int N, int M, int K, int P, int Q, int R, int i, int j, int k, unsigned char x[P][Q][R], int y[N][M][K]) { int ii, jj, kk; #pragma omp parallel for private(ii,jj,kk) for (ii = 0; ii < P; ++ii) for (jj = 0; jj < Q; ++jj) for (kk = 0; kk < R; ++kk) y[i + ii][j + jj][k + kk] = x[ii][jj][kk]; } void bar (int N, int M, int K, int P, int Q, int R, int i, int j, int k, unsigned char x[P][Q][R], float y[N][M][K], float factor, float zero) { int ii, jj, kk; #pragma omp parallel for private(ii,jj,kk) for (ii = 0; ii < P; ++ii) for (jj = 0; jj < Q; ++jj) for (kk = 0; kk < R; ++kk) y[i + ii][j + jj][k + kk] = factor * x[ii][jj][kk] + zero; }
par_add_cycle.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * ParAMG cycling routine * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGCycle *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGAdditiveCycle( void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParCSRMatrix **P_array; hypre_ParCSRMatrix **R_array; hypre_ParCSRMatrix *Lambda; hypre_ParCSRMatrix *Atilde; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParVector *Vtemp; hypre_ParVector *Ztemp; hypre_ParVector *Xtilde, *Rtilde; HYPRE_Int **CF_marker_array; HYPRE_Int num_levels; HYPRE_Int addlvl, add_end; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int simple; HYPRE_Int add_last_lvl; HYPRE_Int i, j, num_rows; HYPRE_Int n_global; HYPRE_Int rlx_order; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int level; HYPRE_Int coarse_grid; HYPRE_Int fine_grid; HYPRE_Int rlx_down; HYPRE_Int rlx_up; HYPRE_Int rlx_coarse; HYPRE_Int *grid_relax_type; HYPRE_Int *num_grid_sweeps; HYPRE_Real **l1_norms; HYPRE_Real alpha, beta; HYPRE_Real *u_data; HYPRE_Real *v_data; HYPRE_Real *l1_norms_lvl; HYPRE_Real *D_inv; HYPRE_Real *x_global; HYPRE_Real *r_global; HYPRE_Real *relax_weight; HYPRE_Real *omega; #if 0 HYPRE_Real *D_mat; HYPRE_Real *S_vec; #endif /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); P_array = hypre_ParAMGDataPArray(amg_data); R_array = hypre_ParAMGDataRArray(amg_data); CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data); Vtemp = hypre_ParAMGDataVtemp(amg_data); Ztemp = hypre_ParAMGDataZtemp(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); simple = hypre_ParAMGDataSimple(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data); Lambda = hypre_ParAMGDataLambda(amg_data); Atilde = hypre_ParAMGDataAtilde(amg_data); Xtilde = hypre_ParAMGDataXtilde(amg_data); Rtilde = hypre_ParAMGDataRtilde(amg_data); l1_norms = hypre_ParAMGDataL1Norms(amg_data); D_inv = hypre_ParAMGDataDinv(amg_data); relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); omega = hypre_ParAMGDataOmega(amg_data); rlx_order = hypre_ParAMGDataRelaxOrder(amg_data); num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data); /* Initialize */ addlvl = hypre_max(additive, mult_additive); addlvl = hypre_max(addlvl, simple); if (add_last_lvl == -1 ) add_end = num_levels-1; else add_end = add_last_lvl; Solve_err_flag = 0; /*--------------------------------------------------------------------- * Main loop of cycling --- multiplicative version --- V-cycle *--------------------------------------------------------------------*/ /* down cycle */ rlx_down = grid_relax_type[1]; rlx_up = grid_relax_type[2]; rlx_coarse = grid_relax_type[3]; for (level = 0; level < num_levels-1; level++) { fine_grid = level; coarse_grid = level + 1; u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid])); v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp)); l1_norms_lvl = l1_norms[level]; hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0); if (level < addlvl || level > add_end) /* multiplicative version */ { /* smoothing step */ if (rlx_down == 0) { HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid])); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid])); num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]]; } } else if (rlx_down != 18) { /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/ for (j=0; j < num_grid_sweeps[1]; j++) { hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], rlx_down,rlx_order,1, relax_weight[fine_grid], omega[fine_grid], l1_norms[level], U_array[fine_grid], Vtemp, Ztemp); hypre_ParVectorCopy(F_array[fine_grid],Vtemp); } } else { num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) u_data[i] += v_data[i] / l1_norms_lvl[i]; } } alpha = -1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid], beta, Vtemp); alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } else /* additive version */ { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); if (level == 0) /* compute residual */ { hypre_ParVectorCopy(Vtemp, Rtilde); hypre_ParVectorCopy(U_array[fine_grid],Xtilde); } alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } } /* additive smoothing and solve coarse grid */ if (addlvl < num_levels) { if (simple > -1) { x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde)); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_global; i++) x_global[i] += D_inv[i]*r_global[i]; } else { if (num_grid_sweeps[1] > 1) { n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde)); hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global); hypre_SeqVectorInitialize(Tmptilde_local); hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local; hypre_ParVectorOwnsData(Tmptilde) = 1; hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde); hypre_ParVectorScale(2.0,Rtilde); hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde); hypre_ParVectorDestroy(Tmptilde); } hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde); } if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]); } if (add_end < num_levels -1) { fine_grid = num_levels -1; for (j=0; j < num_grid_sweeps[3]; j++) if (rlx_coarse == 18) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid], 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); else hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], NULL, rlx_coarse,0,0, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp); } /* up cycle */ for (level = num_levels-1; level > 0; level--) { fine_grid = level - 1; coarse_grid = level; if (level <= addlvl || level > add_end+1) /* multiplicative version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); if (rlx_up != 18) /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/ for (j=0; j < num_grid_sweeps[2]; j++) hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], rlx_up,rlx_order,2, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp); else if (rlx_order) { HYPRE_Int loc_relax_points[2]; loc_relax_points[0] = -1; loc_relax_points[1] = 1; for (j=0; j < num_grid_sweeps[2]; j++) for (i=0; i < 2; i++) hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], loc_relax_points[i], 1.0, l1_norms[fine_grid], U_array[fine_grid], Vtemp); } else for (j=0; j < num_grid_sweeps[2]; j++) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid], 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); } else /* additive version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); } } return(Solve_err_flag); } HYPRE_Int hypre_CreateLambda(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ MPI_Comm comm; hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_ParCSRMatrix *Lambda; hypre_CSRMatrix *L_diag; hypre_CSRMatrix *L_offd; hypre_ParCSRMatrix *Atilde; hypre_CSRMatrix *Atilde_diag; hypre_CSRMatrix *Atilde_offd; HYPRE_Real *Atilde_diag_data; HYPRE_Real *Atilde_offd_data; hypre_CSRMatrix *A_tmp_diag; hypre_CSRMatrix *A_tmp_offd; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; hypre_ParCSRCommPkg *comm_pkg; hypre_ParCSRCommPkg *L_comm_pkg = NULL; hypre_ParCSRCommHandle *comm_handle; HYPRE_Real *L_diag_data; HYPRE_Real *L_offd_data; HYPRE_Real *buf_data = NULL; HYPRE_Real *tmp_data; HYPRE_Real *x_data; HYPRE_Real *r_data; HYPRE_Real *l1_norms; HYPRE_Real *A_tmp_diag_data; HYPRE_Real *A_tmp_offd_data; HYPRE_Real *D_data = NULL; HYPRE_Real *D_data_offd = NULL; HYPRE_Int *L_diag_i; HYPRE_Int *L_diag_j; HYPRE_Int *L_offd_i; HYPRE_Int *L_offd_j; HYPRE_Int *Atilde_diag_i; HYPRE_Int *Atilde_diag_j; HYPRE_Int *Atilde_offd_i; HYPRE_Int *Atilde_offd_j; HYPRE_Int *A_tmp_diag_i; HYPRE_Int *A_tmp_offd_i; HYPRE_Int *A_tmp_diag_j; HYPRE_Int *A_tmp_offd_j; HYPRE_Int *L_recv_ptr = NULL; HYPRE_Int *L_send_ptr = NULL; HYPRE_Int *L_recv_procs = NULL; HYPRE_Int *L_send_procs = NULL; HYPRE_Int *L_send_map_elmts = NULL; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *send_map_elmts; HYPRE_Int *send_map_starts; HYPRE_Int *recv_vec_starts; HYPRE_Int *all_send_procs = NULL; HYPRE_Int *all_recv_procs = NULL; HYPRE_Int *remap = NULL; HYPRE_Int *level_start; HYPRE_Int addlvl; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int num_levels; HYPRE_Int num_add_lvls; HYPRE_Int num_procs; HYPRE_Int num_sends, num_recvs; HYPRE_Int num_sends_L = 0; HYPRE_Int num_recvs_L = 0; HYPRE_Int send_data_L = 0; HYPRE_Int num_rows_L = 0; HYPRE_Int num_rows_tmp = 0; HYPRE_Int num_cols_offd_L = 0; HYPRE_Int num_cols_offd = 0; HYPRE_Int level, i, j, k; HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd; HYPRE_Int A_cnt_diag, A_cnt_offd; HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start; HYPRE_Int start_diag, start_offd, indx, cnt_map; HYPRE_Int start, j_indx, index, cnt_level; HYPRE_Int max_sends, max_recvs; HYPRE_Int ns; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd; HYPRE_Real **l1_norms_ptr = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Int relax_type; */ HYPRE_Int add_rlx; HYPRE_Int add_last_lvl, add_end; HYPRE_Real add_rlx_wt; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ comm = hypre_ParCSRMatrixComm(A_array[0]); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1]; hypre_MPI_Comm_size(comm,&num_procs); l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); addlvl = hypre_max(additive, mult_additive); if (add_last_lvl != -1) add_end = add_last_lvl+1; else add_end = num_levels; num_add_lvls = add_end+1-addlvl; level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1, HYPRE_MEMORY_HOST); send_data_L = 0; num_rows_L = 0; num_cols_offd_L = 0; num_nonzeros_diag = 0; num_nonzeros_offd = 0; level_start[0] = 0; cnt = 1; max_sends = 0; max_recvs = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd); num_rows_L += num_rows_tmp; level_start[cnt] = level_start[cnt-1] + num_rows_tmp; cnt++; num_cols_offd_L += num_cols_offd; num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp]; num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); max_sends += num_sends; if (num_sends) send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends); max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg); } } if (max_sends >= num_procs ||max_recvs >= num_procs) { max_sends = num_procs; max_recvs = num_procs; } if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends, HYPRE_MEMORY_HOST); if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs, HYPRE_MEMORY_HOST); cnt_send = 0; cnt_recv = 0; if (max_sends || max_recvs) { if (max_sends < num_procs && max_recvs < num_procs) { for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); for (j = 0; j < num_sends; j++) all_send_procs[cnt_send++] = send_procs[j]; for (j = 0; j < num_recvs; j++) all_recv_procs[cnt_recv++] = recv_procs[j]; } } if (max_sends) { hypre_qsort0(all_send_procs, 0, max_sends-1); num_sends_L = 1; this_proc = all_send_procs[0]; for (i=1; i < max_sends; i++) { if (all_send_procs[i] > this_proc) { this_proc = all_send_procs[i]; all_send_procs[num_sends_L++] = this_proc; } } L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); for (j=0; j < num_sends_L; j++) L_send_procs[j] = all_send_procs[j]; hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST); } if (max_recvs) { hypre_qsort0(all_recv_procs, 0, max_recvs-1); num_recvs_L = 1; this_proc = all_recv_procs[0]; for (i=1; i < max_recvs; i++) { if (all_recv_procs[i] > this_proc) { this_proc = all_recv_procs[i]; all_recv_procs[num_recvs_L++] = this_proc; } } L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); for (j=0; j < num_recvs_L; j++) L_recv_procs[j] = all_recv_procs[j]; hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST); } L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST); for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } for (k = 0; k < num_sends; k++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L); L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k]; } for (k = 0; k < num_recvs; k++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L); L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k]; } } L_recv_ptr[0] = 0; for (i=1; i < num_recvs_L; i++) L_recv_ptr[i+1] += L_recv_ptr[i]; L_send_ptr[0] = 0; for (i=1; i < num_sends_L; i++) L_send_ptr[i+1] += L_send_ptr[i]; } else { num_recvs_L = 0; num_sends_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); for (j = 0; j < num_sends; j++) { this_proc = send_procs[j]; if (all_send_procs[this_proc] == 0) num_sends_L++; all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j]; } for (j = 0; j < num_recvs; j++) { this_proc = recv_procs[j]; if (all_recv_procs[this_proc] == 0) num_recvs_L++; all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j]; } } } if (max_sends) { L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST); num_sends_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_send_procs[j]; if (this_proc) { L_send_procs[num_sends_L++] = j; L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1]; } } } if (max_recvs) { L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST); num_recvs_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_recv_procs[j]; if (this_proc) { L_recv_procs[num_recvs_L++] = j; L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1]; } } } } } if (max_sends) hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST); if (max_recvs) hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST); L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(L_diag); hypre_CSRMatrixInitialize(L_offd); if (num_nonzeros_diag) { L_diag_data = hypre_CSRMatrixData(L_diag); L_diag_j = hypre_CSRMatrixJ(L_diag); } L_diag_i = hypre_CSRMatrixI(L_diag); if (num_nonzeros_offd) { L_offd_data = hypre_CSRMatrixData(L_offd); L_offd_j = hypre_CSRMatrixJ(L_offd); } L_offd_i = hypre_CSRMatrixI(L_offd); if (ns > 1) { Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(Atilde_diag); hypre_CSRMatrixInitialize(Atilde_offd); if (num_nonzeros_diag) { Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag); Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag); } Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag); if (num_nonzeros_offd) { Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd); Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd); } Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd); } if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST); if (send_data_L) { L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L, HYPRE_MEMORY_SHARED); buf_data = hypre_CTAlloc(HYPRE_Real, send_data_L, HYPRE_MEMORY_HOST); } if (num_cols_offd_L) { D_data_offd = hypre_CTAlloc(HYPRE_Real, num_cols_offd_L, HYPRE_MEMORY_HOST); /*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/ remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L, HYPRE_MEMORY_HOST); } Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); cnt = 0; cnt_level = 0; cnt_diag = 0; cnt_offd = 0; cnt_row = 1; L_diag_i[0] = 0; L_offd_i[0] = 0; if (ns > 1) { A_cnt_diag = 0; A_cnt_offd = 0; Atilde_diag_i[0] = 0; Atilde_offd_i[0] = 0; } for (level=addlvl; level < add_end; level++) { row_start = level_start[cnt_level]; if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) hypre_TFree(tmp_data, HYPRE_MEMORY_SHARED); hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) hypre_TFree(tmp_data, HYPRE_MEMORY_SHARED); hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } cnt_level++; start_diag = L_diag_i[cnt_row-1]; start_offd = L_offd_i[cnt_row-1]; A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag); A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd); A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } /* Compute new combined communication package */ for (i=0; i < num_sends; i++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L); indx = L_send_ptr[this_proc]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { L_send_map_elmts[indx++] = row_start + send_map_elmts[j]; } L_send_ptr[this_proc] = indx; } cnt_map = 0; for (i = 0; i < num_recvs; i++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L); indx = L_recv_ptr[this_proc]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { remap[cnt_map++] = indx++; } L_recv_ptr[this_proc] = indx; } /* Compute Lambda */ if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = 1.0/l1_norms[i]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } if (num_procs > 1) { index = 0; for (i=0; i < num_sends; i++) { start = send_map_starts[i]; for (j=start; j < send_map_starts[i+1]; j++) buf_data[index++] = D_data[send_map_elmts[j]]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, D_data_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } for (i = 0; i < num_rows_tmp; i++) { j_indx = A_tmp_diag_i[i]; if (ns > 1) { Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx]; Atilde_diag_j[A_cnt_diag++] = i+row_start; } L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i]; L_diag_j[cnt_diag++] = i+row_start; for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i]; L_diag_j[cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i]; L_offd_j[cnt_offd++] = remap[j_indx]; } if (ns > 1) { for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j]; Atilde_diag_j[A_cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j]; Atilde_offd_j[A_cnt_offd++] = remap[j_indx]; } } } cnt_row += num_rows_tmp; } if (L_send_ptr) { for (i=num_sends_L-1; i > 0; i--) L_send_ptr[i] = L_send_ptr[i-1]; L_send_ptr[0] = 0; } else L_send_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); if (L_recv_ptr) { for (i=num_recvs_L-1; i > 0; i--) L_recv_ptr[i] = L_recv_ptr[i-1]; L_recv_ptr[0] = 0; } else L_recv_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs; hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs; hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr; hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts; hypre_ParCSRCommPkgComm(L_comm_pkg) = comm; Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDiag(Lambda) = L_diag; hypre_ParCSRMatrixOffd(Lambda) = L_offd; hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg; hypre_ParCSRMatrixComm(Lambda) = comm; hypre_ParCSRMatrixOwnsData(Lambda) = 1; if (ns > 1) { /*hypre_ParCSRCommPkg *A_comm_pkg = NULL; HYPRE_Int *A_recv_ptr = NULL; HYPRE_Int *A_send_ptr = NULL; HYPRE_Int *A_recv_procs = NULL; HYPRE_Int *A_send_procs = NULL; HYPRE_Int *A_send_map_elmts = NULL; A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L], HYPRE_MEMORY_HOST); for (i=0; i<num_recvs_L+1; i++) A_recv_ptr[i] = L_recv_ptr[i]; for (i=0; i<num_sends_L+1; i++) A_send_ptr[i] = L_send_ptr[i]; for (i=0; i<num_recvs_L; i++) A_recv_procs[i] = L_recv_procs[i]; for (i=0; i<num_sends_L; i++) A_send_procs[i] = L_send_procs[i]; for (i=0; i < L_send_ptr[num_sends_L]; i++) A_send_map_elmts[i] = L_send_map_elmts[i]; hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs; hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs; hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr; hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts; hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */ Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag; hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd; hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg; hypre_ParCSRMatrixComm(Atilde) = comm; hypre_ParCSRMatrixOwnsData(Atilde) = 1; hypre_ParAMGDataAtilde(amg_data) = Atilde; } hypre_ParAMGDataLambda(amg_data) = Lambda; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; hypre_TFree(D_data_offd, HYPRE_MEMORY_HOST); hypre_TFree(D_data, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(remap, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(level_start, HYPRE_MEMORY_HOST); return Solve_err_flag; } HYPRE_Int hypre_CreateDinv(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_CSRMatrix *A_tmp_diag; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; HYPRE_Real *x_data; HYPRE_Real *r_data; HYPRE_Real *tmp_data; HYPRE_Real *D_inv = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Real relax_type;*/ HYPRE_Int addlvl; HYPRE_Int num_levels; HYPRE_Int num_rows_L; HYPRE_Int num_rows_tmp; HYPRE_Int level, i; HYPRE_Int add_rlx; HYPRE_Real add_rlx_wt; HYPRE_Int add_last_lvl, add_end; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Real **l1_norms_ptr = NULL; HYPRE_Real *l1_norms; HYPRE_Int l1_start; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); addlvl = hypre_ParAMGDataSimple(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); /* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */ if (add_last_lvl == -1 ) add_end = num_levels; else add_end = add_last_lvl; num_rows_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_rows_L += num_rows_tmp; } Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST); l1_start = 0; for (level=addlvl; level < add_end; level++) { if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) hypre_TFree(tmp_data, HYPRE_MEMORY_SHARED); hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) hypre_TFree(tmp_data, HYPRE_MEMORY_SHARED); hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) D_inv[l1_start+i] = 1.0/l1_norms[i]; } l1_start += num_rows_tmp; } hypre_ParAMGDataDinv(amg_data) = D_inv; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; return Solve_err_flag; }
stereo_tracker.h
#ifndef TRACKER_STEREO_TRACKER_H_ #define TRACKER_STEREO_TRACKER_H_ #include <vector> #include <unordered_map> #include <boost/archive/text_oarchive.hpp> #include <boost/archive/text_iarchive.hpp> #include <boost/serialization/vector.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "../base/types.h" #include "stereo_tracker_base.h" #include "debug_helper.h" #include "../base/helper_opencv.h" #include "../mono/tracker_base.h" #include "../../core/image.h" #include "../../core/types.h" #include "../detector/feature_detector_base.h" #include "../../reconstruction/base/stereo_costs.h" namespace track { class StereoTracker : public StereoTrackerBase { public: StereoTracker(TrackerBase& tracker, int max_disparity, int stereo_wsz, double ncc_thresh, bool estimate_subpixel, bool use_df, const std::string& deformation_field_path); virtual void init(const cv::Mat& img_left, const cv::Mat& img_right); virtual void track(const cv::Mat& img_left, const cv::Mat& img_right); virtual int countFeatures() const; virtual FeatureInfo featureLeft(int i) const; virtual FeatureInfo featureRight(int i) const; virtual void removeTrack(int id); virtual int countActiveTracks() const; virtual FeatureData getLeftFeatureData(int i); virtual FeatureData getRightFeatureData(int i); virtual void showTrack(int i) const; private: friend class boost::serialization::access; // When the class Archive corresponds to an output archive, the // & operator is defined similar to <<. Likewise, when the class Archive // is a type of input archive the & operator is defined similar to >>. template<class Archive> void serialize(Archive& ar, const unsigned int version) { // serialize base class information //ar & boost::serialization::base_object<StereoTrackerBase>(*this); ar & max_feats_; ar & img_size_; ar & max_disparity_; ar & stereo_wsz_; ar & margin_sz_; ar & ncc_thresh_; ar & estimate_subpixel_; ar & age_; ar & pts_left_prev_; ar & pts_left_curr_; ar & pts_right_prev_; ar & pts_right_curr_; } void AddMissingDescriptors(const cv::Mat& img, const core::Point& point, int window_size, std::vector<std::pair<bool, core::DescriptorNCC>>& descriptors_rprev_); bool stereo_match_ncc(const core::DescriptorNCC& desc_left, const std::vector<std::pair<bool, core::DescriptorNCC>>& descriptors_right, const core::Point& left_pt, const cv::Mat& img_right, bool debug, core::Point& right_pt); // deformation field functions void ComputeCellCenters(); void GetPointCell(const core::Point& pt, int& row, int& col); void InterpolateBilinear(const cv::Mat& mat, const int row, const int col, const double x, const double y, double& ival); void InterpolateLinear(const double val1, const double val2, const double x, const double size, double& ival); void ApplyDeformationField(const cv::Mat& def_x, const cv::Mat& def_y, core::Point& pt); track::TrackerBase& tracker_; int max_feats_; int img_size_; int max_disparity_; int stereo_wsz_, margin_sz_; double ncc_thresh_; bool estimate_subpixel_; cv::Mat img_lp_, img_rp_, img_lc_, img_rc_; std::vector<std::pair<bool, core::DescriptorNCC>> descriptors_rprev_, descriptors_rcurr_; //cv::Mat desc_rprev_, desc_rcurr_; //std::vector<double> distances_prev_, distances_curr_; std::vector<int> age_; std::vector<core::Point> pts_left_prev_, pts_left_curr_; std::vector<core::Point> pts_right_prev_, pts_right_curr_; std::vector<core::Point> df_left_prev_, df_left_curr_; std::vector<core::Point> df_right_prev_, df_right_curr_; bool use_deformation_field_ = false; cv::Mat left_dx_, left_dy_; cv::Mat right_dx_, right_dy_; cv::Mat cell_centers_x_; cv::Mat cell_centers_y_; int img_rows_; int img_cols_; int cell_width_, cell_height_; }; inline FeatureData StereoTracker::getLeftFeatureData(int i) { FeatureData fdata = tracker_.getFeatureData(i); return fdata; } inline FeatureData StereoTracker::getRightFeatureData(int i) { FeatureData fdata; fdata.feat_ = featureRight(i); //fdata.desc_prev_ = desc_rprev_.row(i).reshape(1, stereo_wsz_); //fdata.desc_curr_ = desc_rcurr_.row(i).reshape(1, stereo_wsz_); return fdata; } inline bool StereoTracker::stereo_match_ncc(const core::DescriptorNCC& desc_left, const std::vector<std::pair<bool, core::DescriptorNCC>>& descriptors_right, const core::Point& left_pt, const cv::Mat& img_right, bool debug, core::Point& right_pt) { bool success = false; std::vector<double> costs; costs.assign(max_disparity_, std::numeric_limits<double>::max()); int x = static_cast<int>(left_pt.x_); int y = static_cast<int>(left_pt.y_); //int min_x = std::max(margin_sz_, int(left_pt.x_) - max_disparity_); int max_disp = std::min(max_disparity_, static_cast<int>(left_pt.x_) - margin_sz_); int best_d = -1; double best_cost = 0.0; //for (; x >= min_x; x--, d++) { int row_start = y * img_right.cols; #pragma omp parallel for for (int d = 0; d <= max_disp; d++) { int key = row_start + x - d; assert(descriptors_right[key].first == true); const core::DescriptorNCC& desc_right = descriptors_right[key].second; costs[d] = recon::StereoCosts::get_cost_NCC(desc_left, desc_right); if (debug) { printf("d = %d\nNCC = %f\n\b", d, costs[d]); HelperOpencv::DrawPoint(core::Point(left_pt.x_ - d, left_pt.y_), img_right, "right_point"); HelperOpencv::DrawPoint(core::Point(left_pt.x_ - best_d, left_pt.y_), img_right, "best_right_point"); HelperOpencv::DrawDescriptor(desc_left.vec, stereo_wsz_, "desc_left"); HelperOpencv::DrawDescriptor(desc_right.vec, stereo_wsz_, "desc_right"); int key = cv::waitKey(0); if (key == 27) debug = false; } if (std::isnan(costs[d])) { //printf("nan skipped!\n", d, costs[d]); continue; } if (costs[d] > best_cost) { best_cost = costs[d]; best_d = d; } } //printf("Min cost = %d -- d = %d\n", static_cast<int>(min_cost), best_d); if (best_d >= 0 && best_cost >= ncc_thresh_) { //descriptors_right[best_desc_idx].vec.copyTo(save_descriptor); //save_descriptor = descriptors_right[best_desc_idx].vec.t(); //if (best_d > (max_disparity_ - 2)) // printf("Warning: big disp, best cost = %f -- d = %d. max_disp = %d\n", best_cost, best_d, max_disparity_); //std::cout << left_pt << "\n"; success = true; right_pt.y_ = left_pt.y_; // perform parabolic subpixel interpolation if we can if (estimate_subpixel_ && best_d >= 1 && best_d < (max_disp - 1) && !std::isnan(costs[best_d-1]) && !std::isnan(costs[best_d+1])) { double C_left = 2.0 - costs[best_d-1]; double C_center = 2.0 - costs[best_d]; double C_right = 2.0 - costs[best_d+1]; double d_s = (C_left - C_right) / (2.0*C_left - 4.0*C_center + 2.0*C_right); //printf("Parabolic fitting: %d --> %f\n", best_d, best_d+d_s); right_pt.x_ = left_pt.x_ - (static_cast<double>(best_d) + d_s); } else right_pt.x_ = left_pt.x_ - static_cast<double>(best_d); // perform equiangular subpixel interpolation //if (best_d >= 1 && best_d < (max_disp - 1)) { // double C_left = 2.0 - costs[best_d-1]; // double C_center = 2.0 - costs[best_d]; // double C_right = 2.0 - costs[best_d+1]; // double d_s; // if (C_right < C_left) // d_s = 0.5f * (C_right - C_left) / (C_center - C_left); // else // d_s = 0.5f * (C_right - C_left) / (C_center - C_right); // printf("Equangular fitting: %d --> %f\n", best_d, best_d+d_s); // //right_pt.x_ = left_pt.x_ - (static_cast<double>(best_d) + d_s); //} ////else //// right_pt.x_ = left_pt.x_ - static_cast<double>(best_d); } else { right_pt.x_ = std::numeric_limits<double>::max(); right_pt.y_ = std::numeric_limits<double>::max(); } assert(!std::isnan(right_pt.x_) && !std::isnan(right_pt.y_)); return success; } inline void StereoTracker::InterpolateBilinear(const cv::Mat& mat, const int row, const int col, const double x, const double y, double& ival) { double q1 = mat.at<double>(row, col); double q2 = mat.at<double>(row, col+1); double q3 = mat.at<double>(row+1, col); double q4 = mat.at<double>(row+1, col+1); double w = cell_width_; double h = cell_height_; double q12 = ((w-x) / w) * q1 + (x / w) * q2; double q34 = ((w-x) / w) * q3 + (x / w) * q4; ival = ((h-y) / h) * q12 + (y / h) * q34; } inline void StereoTracker::InterpolateLinear(const double val1, const double val2, const double x, const double size, double& ival) { ival = ((size - x) / size) * val1 + (x / size) * val2; } inline void StereoTracker::GetPointCell(const core::Point& pt, int& row, int& col) { col = pt.x_ / cell_width_; row = pt.y_ / cell_height_; //int bin_num = row * bin_cols_ + col; } //inline //bool StereoTracker::stereo_match_census(int max_disparity, int margin_sz, uint32_t census, // const cv::Mat& census_img, // const core::Point& left_pt, // core::Point& right_pt) //{ // bool success = false; // std::vector<uint8_t> costs; // costs.assign(max_disparity, std::numeric_limits<uint8_t>::max()); // int cx = static_cast<int>(left_pt.x_) - margin_sz; // int cy = static_cast<int>(left_pt.y_) - margin_sz; // int max_disp = std::min(max_disparity, cx); // int best_d = -1; // uint8_t min_cost = std::numeric_limits<uint8_t>::max(); // for (int d = 0; d <= max_disp; d++) { // costs[d] = recon::StereoCosts::hamming_dist(census, census_img.at<uint32_t>(cy,cx-d)); // if (costs[d] < min_cost) { // min_cost = costs[d]; // best_d = d; // } // } // //printf("Min cost = %d -- d = %d\n", static_cast<int>(min_cost), best_d); // if (best_d >= 0 && (int)(min_cost) == 0) { // printf("Min cost = %d -- d = %d\n", static_cast<int>(min_cost), best_d); // success = true; // right_pt.y_ = left_pt.y_; // // perform equiangular subpixel interpolation // if (best_d >= 1 && best_d < (max_disp - 1) && !std::isnan(costs[best_d-1]) && !std::isnan(costs[best_d+1])) { // double C_left = static_cast<double>(costs[best_d-1]); // double C_center = static_cast<double>(costs[best_d]); // double C_right = static_cast<double>(costs[best_d+1]); // double d_s = 0; // if (C_right < C_left) // d_s = 0.5f * (C_right - C_left) / (C_center - C_left); // else // d_s = 0.5f * (C_right - C_left) / (C_center - C_right); // //std::cout << d << " -- " << d+d_s << "\n"; // right_pt.x_ = left_pt.x_ - (static_cast<double>(best_d) + d_s); // } // else // right_pt.x_ = left_pt.x_ - static_cast<double>(best_d); // } // return success; //} } #endif
d2q9-bgk-OpenMP.c
/* ** Code to implement a d2q9-bgk lattice boltzmann scheme. ** 'd2' inidates a 2-dimensional grid, and ** 'q9' indicates 9 velocities per grid cell. ** 'bgk' refers to the Bhatnagar-Gross-Krook collision step. ** ** The 'speeds' in each cell are numbered as follows: ** ** 6 2 5 ** \|/ ** 3-0-1 ** /|\ ** 7 4 8 ** ** A 2D grid: ** ** cols ** --- --- --- ** | D | E | F | ** rows --- --- --- ** | A | B | C | ** --- --- --- ** ** 'unwrapped' in row major order to give a 1D array: ** ** --- --- --- --- --- --- ** | A | B | C | D | E | F | ** --- --- --- --- --- --- ** ** Grid indicies are: ** ** ny ** ^ cols(jj) ** | ----- ----- ----- ** | | ... | ... | etc | ** | ----- ----- ----- ** rows(ii) | | 1,0 | 1,1 | 1,2 | ** | ----- ----- ----- ** | | 0,0 | 0,1 | 0,2 | ** | ----- ----- ----- ** ----------------------> nx ** ** Note the names of the input parameter and obstacle files ** are passed on the command line, e.g.: ** ** d2q9-bgk.exe input.params obstacles.dat ** ** Be sure to adjust the grid dimensions in the parameter file ** if you choose a different obstacle file. */ #include<stdio.h> #include<stdlib.h> #include<math.h> #include<time.h> #include<sys/time.h> #include<sys/resource.h> #include<omp.h> #define NSPEEDS 9 #define FINALSTATEFILE "final_state.dat" #define AVVELSFILE "av_vels.dat" /* struct to hold the parameter values */ typedef struct { int nx; /* no. of cells in x-direction */ int ny; /* no. of cells in y-direction */ int maxIters; /* no. of iterations */ int reynolds_dim; /* dimension for Reynolds number */ double density; /* density per link */ double accel; /* density redistribution */ double omega; /* relaxation parameter */ } t_param; /* struct to hold the 'speed' values */ typedef struct { double speeds[NSPEEDS]; } t_speed; enum boolean { FALSE, TRUE }; /* ** function prototypes */ /* load params, allocate memory, load obstacles & initialise fluid particle densities */ int initialise(const char* paramfile, const char* obstaclefile, t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr); /* ** The main calculation methods. ** timestep calls, in order, the functions: ** accelerate_flow(), propagate(), rebound() & collision() */ int timestep(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles); int accelerate_flow(const t_param params, t_speed* cells, int* obstacles); int propagate(const t_param params, t_speed* cells, t_speed* tmp_cells); int rebound(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles); int collision(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles); int write_values(const t_param params, t_speed* cells, int* obstacles, double* av_vels); /* finalise, including freeing up allocated memory */ int finalise(const t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr); /* Sum all the densities in the grid. ** The total should remain constant from one timestep to the next. */ double total_density(const t_param params, t_speed* cells); /* compute average velocity */ double av_velocity(const t_param params, t_speed* cells, int* obstacles); /* calculate Reynolds number */ double calc_reynolds(const t_param params, t_speed* cells, int* obstacles); /* utility functions */ void die(const char* message, const int line, const char *file); void usage(const char* exe); /* ** main program: ** initialise, timestep loop, finalise */ int main(int argc, char* argv[]) { char* paramfile = NULL; /* name of the input parameter file */ char* obstaclefile = NULL; /* name of a the input obstacle file */ t_param params; /* struct to hold parameter values */ t_speed* cells = NULL; /* grid containing fluid densities */ t_speed* tmp_cells = NULL; /* scratch space */ int* obstacles = NULL; /* grid indicating which cells are blocked */ double* av_vels = NULL; /* a record of the av. velocity computed for each timestep */ int ii; /* generic counter */ struct timeval timstr; /* structure to hold elapsed time */ struct rusage ru; /* structure to hold CPU time--system and user */ double tic,toc; /* floating point numbers to calculate elapsed wallclock time */ double usrtim; /* floating point number to record elapsed user CPU time */ double systim; /* floating point number to record elapsed system CPU time */ /* parse the command line */ if(argc != 3) { usage(argv[0]); } else{ paramfile = argv[1]; obstaclefile = argv[2]; } /* initialise our data structures and load values from file */ initialise(paramfile, obstaclefile, &params, &cells, &tmp_cells, &obstacles, &av_vels); /* iterate for maxIters timesteps */ gettimeofday(&timstr,NULL); tic=timstr.tv_sec+(timstr.tv_usec/1000000.0); for (ii=0;ii<params.maxIters;ii++) { timestep(params,cells,tmp_cells,obstacles); av_vels[ii] = av_velocity(params,cells,obstacles); #ifdef DEBUG printf("==timestep: %d==\n",ii); printf("av velocity: %.12E\n", av_vels[ii]); printf("tot density: %.12E\n",total_density(params,cells)); #endif } gettimeofday(&timstr,NULL); toc=timstr.tv_sec+(timstr.tv_usec/1000000.0); getrusage(RUSAGE_SELF, &ru); timstr=ru.ru_utime; usrtim=timstr.tv_sec+(timstr.tv_usec/1000000.0); timstr=ru.ru_stime; systim=timstr.tv_sec+(timstr.tv_usec/1000000.0); /* write final values and free memory */ printf("==done==\n"); printf("Reynolds number:\t\t%.12E\n",calc_reynolds(params,cells,obstacles)); printf("Elapsed time:\t\t\t%.6lf (s)\n", toc-tic); printf("Elapsed user CPU time:\t\t%.6lf (s)\n", usrtim); printf("Elapsed system CPU time:\t%.6lf (s)\n", systim); write_values(params,cells,obstacles,av_vels); finalise(&params, &cells, &tmp_cells, &obstacles, &av_vels); return EXIT_SUCCESS; } int timestep(const t_param params, t_speed* cells, t_speed* tmp_cells, int* obstacles) { int ii,jj; /* generic counters */ double w1,w2; /* weighting factors */ /* compute weighting factors */ w1 = params.density * params.accel / 9.0; w2 = params.density * params.accel / 36.0; /* modify the 2nd row of the grid */ ii=params.ny - 2; #pragma omp parallel { #pragma omp for firstprivate(ii,w1,w2) private(jj) for(jj=0;jj<params.nx;jj++) { /* if the cell is not occupied and ** we don't send a density negative */ if( !obstacles[ii*params.nx + jj] && (cells[ii*params.nx + jj].speeds[3] - w1) > 0.0 && (cells[ii*params.nx + jj].speeds[6] - w2) > 0.0 && (cells[ii*params.nx + jj].speeds[7] - w2) > 0.0 ) { /* increase 'east-side' densities */ cells[ii*params.nx + jj].speeds[1] += w1; cells[ii*params.nx + jj].speeds[5] += w2; cells[ii*params.nx + jj].speeds[8] += w2; /* decrease 'west-side' densities */ cells[ii*params.nx + jj].speeds[3] -= w1; cells[ii*params.nx + jj].speeds[6] -= w2; cells[ii*params.nx + jj].speeds[7] -= w2; } } int x_e,x_w,y_n,y_s; /* indices of neighbouring cells */ /* loop over _all_ cells */ #pragma omp for private(ii) private(jj) private(y_n) private(x_e) private(y_s) private(x_w) for(ii=0;ii<params.ny;ii++) { for(jj=0;jj<params.nx;jj++) { /* determine indices of axis-direction neighbours ** respecting periodic boundary conditions (wrap around) */ y_n = (ii + 1) % params.ny; x_e = (jj + 1) % params.nx; y_s = (ii == 0) ? (ii + params.ny - 1) : (ii - 1); x_w = (jj == 0) ? (jj + params.nx - 1) : (jj - 1); /* propagate densities to neighbouring cells, following ** appropriate directions of travel and writing into ** scratch space grid */ tmp_cells[ii *params.nx + jj].speeds[0] = cells[ii*params.nx + jj].speeds[0]; /* central cell, */ /* no movement */ tmp_cells[ii *params.nx + x_e].speeds[1] = cells[ii*params.nx + jj].speeds[1]; /* east */ tmp_cells[y_n*params.nx + jj].speeds[2] = cells[ii*params.nx + jj].speeds[2]; /* north */ tmp_cells[ii *params.nx + x_w].speeds[3] = cells[ii*params.nx + jj].speeds[3]; /* west */ tmp_cells[y_s*params.nx + jj].speeds[4] = cells[ii*params.nx + jj].speeds[4]; /* south */ tmp_cells[y_n*params.nx + x_e].speeds[5] = cells[ii*params.nx + jj].speeds[5]; /* north-east */ tmp_cells[y_n*params.nx + x_w].speeds[6] = cells[ii*params.nx + jj].speeds[6]; /* north-west */ tmp_cells[y_s*params.nx + x_w].speeds[7] = cells[ii*params.nx + jj].speeds[7]; /* south-west */ tmp_cells[y_s*params.nx + x_e].speeds[8] = cells[ii*params.nx + jj].speeds[8]; /* south-east */ } } /* loop over the cells in the grid */ #pragma omp for private(ii) private(jj) for(ii=0;ii<params.ny;ii++) { for(jj=0;jj<params.nx;jj++) { /* if the cell contains an obstacle */ if(obstacles[ii*params.nx + jj]) { /* called after propagate, so taking values from scratch space ** mirroring, and writing into main grid */ cells[ii*params.nx + jj].speeds[1] = tmp_cells[ii*params.nx + jj].speeds[3]; cells[ii*params.nx + jj].speeds[2] = tmp_cells[ii*params.nx + jj].speeds[4]; cells[ii*params.nx + jj].speeds[3] = tmp_cells[ii*params.nx + jj].speeds[1]; cells[ii*params.nx + jj].speeds[4] = tmp_cells[ii*params.nx + jj].speeds[2]; cells[ii*params.nx + jj].speeds[5] = tmp_cells[ii*params.nx + jj].speeds[7]; cells[ii*params.nx + jj].speeds[6] = tmp_cells[ii*params.nx + jj].speeds[8]; cells[ii*params.nx + jj].speeds[7] = tmp_cells[ii*params.nx + jj].speeds[5]; cells[ii*params.nx + jj].speeds[8] = tmp_cells[ii*params.nx + jj].speeds[6]; } } } int kk; const double c_sq = 1.0/3.0; /* square of speed of sound */ const double inv_c_sq = 3.0; const double w0 = 4.0/9.0; /* weighting factor */ const double w1 = 1.0/9.0; /* weighting factor */ const double w2 = 1.0/36.0; /* weighting factor */ double u_x,u_y; /* av. velocities in x and y directions */ double u[NSPEEDS]; /* directional velocities */ double d_equ[NSPEEDS]; /* equilibrium densities */ double u_sq; /* squared velocity */ double local_density; /* sum of densities in a particular cell */ double inverselocal_density; /* sum of densities in a particular cell */ /* loop over the cells in the grid ** NB the collision step is called after ** the propagate step and so values of interest ** are in the scratch-space grid */ #pragma omp for private(ii) private(jj) private(local_density) private(kk) private(u_x) private(u_y) private(u) private(inverselocal_density) private(u_sq) private(d_equ) for(ii=0;ii<params.ny;ii++) { for(jj=0;jj<params.nx;jj++) { /* don't consider occupied cells */ if(!obstacles[ii*params.nx + jj]) { /* compute local density total */ local_density = 0.0; for(kk=0;kk<NSPEEDS;kk++) { local_density += tmp_cells[ii*params.nx + jj].speeds[kk]; } inverselocal_density = 1/local_density; /* compute x velocity component */ u_x = (tmp_cells[ii*params.nx + jj].speeds[1] + tmp_cells[ii*params.nx + jj].speeds[5] + tmp_cells[ii*params.nx + jj].speeds[8] - (tmp_cells[ii*params.nx + jj].speeds[3] + tmp_cells[ii*params.nx + jj].speeds[6] + tmp_cells[ii*params.nx + jj].speeds[7])) * inverselocal_density; /* compute y velocity component */ u_y = (tmp_cells[ii*params.nx + jj].speeds[2] + tmp_cells[ii*params.nx + jj].speeds[5] + tmp_cells[ii*params.nx + jj].speeds[6] - (tmp_cells[ii*params.nx + jj].speeds[4] + tmp_cells[ii*params.nx + jj].speeds[7] + tmp_cells[ii*params.nx + jj].speeds[8])) * inverselocal_density; /* velocity squared */ u_sq = u_x * u_x + u_y * u_y; /* directional velocity components */ u[1] = u_x; /* east */ u[2] = u_y; /* north */ u[3] = - u_x; /* west */ u[4] = - u_y; /* south */ u[5] = u_x + u_y; /* north-east */ u[6] = - u_x + u_y; /* north-west */ u[7] = - u_x - u_y; /* south-west */ u[8] = u_x - u_y; /* south-east */ /* equilibrium densities */ /* zero velocity density: weight w0 */ d_equ[0] = w0 * local_density * (1.0 - u_sq / (2.0 * c_sq)); /* axis speeds: weight w1 */ d_equ[1] = w1 * local_density * (1.0 + (u[1] * inv_c_sq) + (((u[1] * u[1]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); d_equ[2] = w1 * local_density * (1.0 + (u[2] * inv_c_sq) + (((u[2] * u[2]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); d_equ[3] = w1 * local_density * (1.0 + (u[3] * inv_c_sq) + (((u[3] * u[3]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); d_equ[4] = w1 * local_density * (1.0 + (u[4] * inv_c_sq) + (((u[4] * u[4]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); /* diagonal speeds: weight w2 */ d_equ[5] = w2 * local_density * (1.0 + (u[5] * inv_c_sq) + (((u[5] * u[5]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); d_equ[6] = w2 * local_density * (1.0 + (u[6] * inv_c_sq) + (((u[6] * u[6]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); d_equ[7] = w2 * local_density * (1.0 + (u[7] * inv_c_sq) + (((u[7] * u[7]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); d_equ[8] = w2 * local_density * (1.0 + (u[8] * inv_c_sq) + (((u[8] * u[8]) * inv_c_sq * inv_c_sq) / (2.0)) - (u_sq * inv_c_sq) / (2.0)); /* relaxation step */ for(kk=0;kk<NSPEEDS;kk++) { cells[ii*params.nx + jj].speeds[kk] = (tmp_cells[ii*params.nx + jj].speeds[kk] + params.omega * (d_equ[kk] - tmp_cells[ii*params.nx + jj].speeds[kk])); } } } } } return EXIT_SUCCESS; } int initialise(const char* paramfile, const char* obstaclefile, t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr) { char message[1024]; /* message buffer */ FILE *fp; /* file pointer */ int ii,jj; /* generic counters */ int xx,yy; /* generic array indices */ int blocked; /* indicates whether a cell is blocked by an obstacle */ int retval; /* to hold return value for checking */ double w0,w1,w2; /* weighting factors */ /* open the parameter file */ fp = fopen(paramfile,"r"); if (fp == NULL) { sprintf(message,"could not open input parameter file: %s", paramfile); die(message,__LINE__,__FILE__); } /* read in the parameter values */ retval = fscanf(fp,"%d\n",&(params->nx)); if(retval != 1) die ("could not read param file: nx",__LINE__,__FILE__); retval = fscanf(fp,"%d\n",&(params->ny)); if(retval != 1) die ("could not read param file: ny",__LINE__,__FILE__); retval = fscanf(fp,"%d\n",&(params->maxIters)); if(retval != 1) die ("could not read param file: maxIters",__LINE__,__FILE__); retval = fscanf(fp,"%d\n",&(params->reynolds_dim)); if(retval != 1) die ("could not read param file: reynolds_dim",__LINE__,__FILE__); retval = fscanf(fp,"%lf\n",&(params->density)); if(retval != 1) die ("could not read param file: density",__LINE__,__FILE__); retval = fscanf(fp,"%lf\n",&(params->accel)); if(retval != 1) die ("could not read param file: accel",__LINE__,__FILE__); retval = fscanf(fp,"%lf\n",&(params->omega)); if(retval != 1) die ("could not read param file: omega",__LINE__,__FILE__); /* and close up the file */ fclose(fp); /* ** Allocate memory. ** ** Remember C is pass-by-value, so we need to ** pass pointers into the initialise function. ** ** NB we are allocating a 1D array, so that the ** memory will be contiguous. We still want to ** index this memory as if it were a (row major ** ordered) 2D array, however. We will perform ** some arithmetic using the row and column ** coordinates, inside the square brackets, when ** we want to access elements of this array. ** ** Note also that we are using a structure to ** hold an array of 'speeds'. We will allocate ** a 1D array of these structs. */ /* main grid */ *cells_ptr = (t_speed*)malloc(sizeof(t_speed)*(params->ny*params->nx)); if (*cells_ptr == NULL) die("cannot allocate memory for cells",__LINE__,__FILE__); /* 'helper' grid, used as scratch space */ *tmp_cells_ptr = (t_speed*)malloc(sizeof(t_speed)*(params->ny*params->nx)); if (*tmp_cells_ptr == NULL) die("cannot allocate memory for tmp_cells",__LINE__,__FILE__); /* the map of obstacles */ *obstacles_ptr = malloc(sizeof(int*)*(params->ny*params->nx)); if (*obstacles_ptr == NULL) die("cannot allocate column memory for obstacles",__LINE__,__FILE__); /* initialise densities */ w0 = params->density * 4.0/9.0; w1 = params->density /9.0; w2 = params->density /36.0; for(ii=0;ii<params->ny;ii++) { for(jj=0;jj<params->nx;jj++) { /* centre */ (*cells_ptr)[ii*params->nx + jj].speeds[0] = w0; /* axis directions */ (*cells_ptr)[ii*params->nx + jj].speeds[1] = w1; (*cells_ptr)[ii*params->nx + jj].speeds[2] = w1; (*cells_ptr)[ii*params->nx + jj].speeds[3] = w1; (*cells_ptr)[ii*params->nx + jj].speeds[4] = w1; /* diagonals */ (*cells_ptr)[ii*params->nx + jj].speeds[5] = w2; (*cells_ptr)[ii*params->nx + jj].speeds[6] = w2; (*cells_ptr)[ii*params->nx + jj].speeds[7] = w2; (*cells_ptr)[ii*params->nx + jj].speeds[8] = w2; } } /* first set all cells in obstacle array to zero */ for(ii=0;ii<params->ny;ii++) { for(jj=0;jj<params->nx;jj++) { (*obstacles_ptr)[ii*params->nx + jj] = 0; } } /* open the obstacle data file */ fp = fopen(obstaclefile,"r"); if (fp == NULL) { sprintf(message,"could not open input obstacles file: %s", obstaclefile); die(message,__LINE__,__FILE__); } /* read-in the blocked cells list */ while( (retval = fscanf(fp,"%d %d %d\n", &xx, &yy, &blocked)) != EOF) { /* some checks */ if ( retval != 3) die("expected 3 values per line in obstacle file",__LINE__,__FILE__); if ( xx<0 || xx>params->nx-1 ) die("obstacle x-coord out of range",__LINE__,__FILE__); if ( yy<0 || yy>params->ny-1 ) die("obstacle y-coord out of range",__LINE__,__FILE__); if ( blocked != 1 ) die("obstacle blocked value should be 1",__LINE__,__FILE__); /* assign to array */ (*obstacles_ptr)[yy*params->nx + xx] = blocked; } /* and close the file */ fclose(fp); /* ** allocate space to hold a record of the avarage velocities computed ** at each timestep */ *av_vels_ptr = (double*)malloc(sizeof(double)*params->maxIters); return EXIT_SUCCESS; } int finalise(const t_param* params, t_speed** cells_ptr, t_speed** tmp_cells_ptr, int** obstacles_ptr, double** av_vels_ptr) { /* ** free up allocated memory */ free(*cells_ptr); *cells_ptr = NULL; free(*tmp_cells_ptr); *tmp_cells_ptr = NULL; free(*obstacles_ptr); *obstacles_ptr = NULL; free(*av_vels_ptr); *av_vels_ptr = NULL; return EXIT_SUCCESS; } double av_velocity(const t_param params, t_speed* cells, int* obstacles) { int ii,jj,kk; /* generic counters */ int tot_cells = 0; /* no. of cells used in calculation */ double local_density; /* total density in cell */ double u_x; /* x-component of velocity for current cell */ double u_y; /* y-component of velocity for current cell */ double tot_u; /* accumulated magnitudes of velocity for each cell */ double inverselocal_density; /* sum of densities in a particular cell */ /* initialise */ tot_u = 0.0; #pragma omp parallel { #pragma omp for reduction(+:tot_u) reduction(+:tot_cells) private(u_x) private(u_y) private(jj) private(local_density) private(inverselocal_density) private(kk) /* loop over all non-blocked cells */ for(ii=0;ii<params.ny;ii++) { for(jj=0;jj<params.nx;jj++) { /* ignore occupied cells */ if(!obstacles[ii*params.nx + jj]) { /* local density total */ local_density = 0.0; for(kk=0;kk<NSPEEDS;kk++) { local_density += cells[ii*params.nx + jj].speeds[kk]; } inverselocal_density = 1/local_density; /* x-component of velocity */ u_x = (cells[ii*params.nx + jj].speeds[1] + cells[ii*params.nx + jj].speeds[5] + cells[ii*params.nx + jj].speeds[8] - (cells[ii*params.nx + jj].speeds[3] + cells[ii*params.nx + jj].speeds[6] + cells[ii*params.nx + jj].speeds[7])) * inverselocal_density; /* compute y velocity component */ u_y = (cells[ii*params.nx + jj].speeds[2] + cells[ii*params.nx + jj].speeds[5] + cells[ii*params.nx + jj].speeds[6] - (cells[ii*params.nx + jj].speeds[4] + cells[ii*params.nx + jj].speeds[7] + cells[ii*params.nx + jj].speeds[8])) * inverselocal_density; /* accumulate the norm of x- and y- velocity components */ tot_u += sqrt((u_x * u_x) + (u_y * u_y)); /* increase counter of inspected cells */ tot_cells = tot_cells + 1; } } } } return tot_u / (double)tot_cells; } double calc_reynolds(const t_param params, t_speed* cells, int* obstacles) { const double viscosity = 1.0 / 6.0 * (2.0 / params.omega - 1.0); return av_velocity(params,cells,obstacles) * params.reynolds_dim / viscosity; } double total_density(const t_param params, t_speed* cells) { int ii,jj,kk; /* generic counters */ double total = 0.0; /* accumulator */ for(ii=0;ii<params.ny;ii++) { for(jj=0;jj<params.nx;jj++) { for(kk=0;kk<NSPEEDS;kk++) { total += cells[ii*params.nx + jj].speeds[kk]; } } } return total; } int write_values(const t_param params, t_speed* cells, int* obstacles, double* av_vels) { FILE* fp; /* file pointer */ int ii,jj,kk; /* generic counters */ const double c_sq = 1.0/3.0; /* sq. of speed of sound */ double local_density; /* per grid cell sum of densities */ double pressure; /* fluid pressure in grid cell */ double u_x; /* x-component of velocity in grid cell */ double u_y; /* y-component of velocity in grid cell */ double u; /* norm--root of summed squares--of u_x and u_y */ fp = fopen(FINALSTATEFILE,"w"); if (fp == NULL) { die("could not open file output file",__LINE__,__FILE__); } for(ii=0;ii<params.ny;ii++) { for(jj=0;jj<params.nx;jj++) { /* an occupied cell */ if(obstacles[ii*params.nx + jj]) { u_x = u_y = u = 0.0; pressure = params.density * c_sq; } /* no obstacle */ else { local_density = 0.0; for(kk=0;kk<NSPEEDS;kk++) { local_density += cells[ii*params.nx + jj].speeds[kk]; } /* compute x velocity component */ u_x = (cells[ii*params.nx + jj].speeds[1] + cells[ii*params.nx + jj].speeds[5] + cells[ii*params.nx + jj].speeds[8] - (cells[ii*params.nx + jj].speeds[3] + cells[ii*params.nx + jj].speeds[6] + cells[ii*params.nx + jj].speeds[7])) / local_density; /* compute y velocity component */ u_y = (cells[ii*params.nx + jj].speeds[2] + cells[ii*params.nx + jj].speeds[5] + cells[ii*params.nx + jj].speeds[6] - (cells[ii*params.nx + jj].speeds[4] + cells[ii*params.nx + jj].speeds[7] + cells[ii*params.nx + jj].speeds[8])) / local_density; /* compute norm of velocity */ u = sqrt((u_x * u_x) + (u_y * u_y)); /* compute pressure */ pressure = local_density * c_sq; } /* write to file */ fprintf(fp,"%d %d %.12E %.12E %.12E %.12E %d\n",jj,ii,u_x,u_y,u,pressure,obstacles[ii*params.nx + jj]); } } fclose(fp); fp = fopen(AVVELSFILE,"w"); if (fp == NULL) { die("could not open file output file",__LINE__,__FILE__); } for (ii=0;ii<params.maxIters;ii++) { fprintf(fp,"%d:\t%.12E\n", ii, av_vels[ii]); } fclose(fp); return EXIT_SUCCESS; } void die(const char* message, const int line, const char *file) { fprintf(stderr, "Error at line %d of file %s:\n", line, file); fprintf(stderr, "%s\n",message); fflush(stderr); exit(EXIT_FAILURE); } void usage(const char* exe) { fprintf(stderr, "Usage: %s <paramfile> <obstaclefile>\n", exe); exit(EXIT_FAILURE); }
OMP_test.c
// OpenMP program to print Hello World using C language // Run with gcc -Xpreprocessor -fopenmp -o OMP_test OMP_test.c -lomp #include "omp.h" #include <stdio.h> int main() { omp_set_dynamic(1); omp_set_num_threads(4); #pragma omp parallel num_threads(4) { int thread_id = omp_get_thread_num(); int nthreads = omp_get_num_threads(); int max_threads = omp_get_max_threads(); int num_proc = omp_get_num_procs(); printf("I'm thread %d out of %d threads. There are max %d threads on %d processors.\n", thread_id, nthreads, max_threads, num_proc); } return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-5,6)),ceild(8*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(4*t1+Ny+5,24)),floord(8*t2+Ny+4,24)),floord(8*t1-8*t2+Nz+Ny+3,24));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(8*t2-Nz-1020,1024)),ceild(24*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(4*t1+Nx+5,1024)),floord(8*t2+Nx+4,1024)),floord(24*t3+Nx+20,1024)),floord(8*t1-8*t2+Nz+Nx+3,1024));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),24*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),24*t3+22),1024*t4+1022),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
array_args.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_ARRAY_AGRS_H_ #define LIGHTGBM_UTILS_ARRAY_AGRS_H_ #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <utility> #include <vector> namespace LightGBM { /*! * \brief Contains some operation for a array, e.g. ArgMax, TopK. */ template<typename VAL_T> class ArrayArgs { public: inline static size_t ArgMaxMT(const std::vector<VAL_T>& array) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } int step = std::max(1, (static_cast<int>(array.size()) + num_threads - 1) / num_threads); std::vector<size_t> arg_maxs(num_threads, 0); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t start = step * i; if (start >= array.size()) { continue; } size_t end = std::min(array.size(), start + step); size_t arg_max = start; for (size_t j = start + 1; j < end; ++j) { if (array[j] > array[arg_max]) { arg_max = j; } } arg_maxs[i] = arg_max; } size_t ret = arg_maxs[0]; for (int i = 1; i < num_threads; ++i) { if (array[arg_maxs[i]] > array[ret]) { ret = arg_maxs[i]; } } return ret; } inline static size_t ArgMax(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } if (array.size() > 1024) { return ArgMaxMT(array); } else { size_t arg_max = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } } inline static size_t ArgMin(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static size_t ArgMax(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_max = 0; for (size_t i = 1; i < n; ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } inline static size_t ArgMin(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < n; ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static void Partition(std::vector<VAL_T>* arr, int start, int end, int* l, int* r) { int i = start - 1; int j = end - 1; int p = i; int q = j; if (start >= end) { return; } std::vector<VAL_T>& ref = *arr; VAL_T v = ref[end - 1]; for (;;) { while (ref[++i] > v); while (v > ref[--j]) { if (j == start) { break; } } if (i >= j) { break; } std::swap(ref[i], ref[j]); if (ref[i] == v) { p++; std::swap(ref[p], ref[i]); } if (v == ref[j]) { q--; std::swap(ref[j], ref[q]); } } std::swap(ref[i], ref[end - 1]); j = i - 1; i = i + 1; for (int k = start; k <= p; k++, j--) { std::swap(ref[k], ref[j]); } for (int k = end - 2; k >= q; k--, i++) { std::swap(ref[i], ref[k]); } *l = j; *r = i; } // Note: k refer to index here. e.g. k=0 means get the max number. inline static int ArgMaxAtK(std::vector<VAL_T>* arr, int start, int end, int k) { if (start >= end - 1) { return start; } int l = start; int r = end - 1; Partition(arr, start, end, &l, &r); // if find or all elements are the same. if ((k > l && k < r) || (l == start - 1 && r == end - 1)) { return k; } else if (k <= l) { return ArgMaxAtK(arr, start, l + 1, k); } else { return ArgMaxAtK(arr, r, end, k); } } // Note: k is 1-based here. e.g. k=3 means get the top-3 numbers. inline static void MaxK(const std::vector<VAL_T>& array, int k, std::vector<VAL_T>* out) { out->clear(); if (k <= 0) { return; } for (auto val : array) { out->push_back(val); } if (static_cast<size_t>(k) >= array.size()) { return; } ArgMaxAtK(out, 0, static_cast<int>(out->size()), k - 1); out->erase(out->begin() + k, out->end()); } inline static void Assign(std::vector<VAL_T>* array, VAL_T t, size_t n) { array->resize(n); for (size_t i = 0; i < array->size(); ++i) { (*array)[i] = t; } } inline static bool CheckAllZero(const std::vector<VAL_T>& array) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != VAL_T(0)) { return false; } } return true; } inline static bool CheckAll(const std::vector<VAL_T>& array, VAL_T t) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != t) { return false; } } return true; } }; } // namespace LightGBM #endif // LightGBM_UTILS_ARRAY_AGRS_H_
parallel_team.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt, multicpu // UNSUPPORTED: gcc #include "callback.h" int main() { #pragma omp target teams num_teams(1) thread_limit(2) #pragma omp parallel num_threads(2) { printf("In teams\n"); } return 0; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1 // CHECK: {{^}}[[MASTER]]: ompt_event_teams_begin: // CHECK-SAME: parent_task_id=[[INIT_TASK]] // CHECK-SAME: {{.*}} requested_num_teams=1 // CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]] // // team 0/thread 0 // // initial task in the teams construct // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_begin: // CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=1, index=0 // parallel region forked by runtime // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]] // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]] // user parallel region // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_begin: // CHECK-SAME: {{.*}} parent_task_id=[[IMPL_TASK_0]] // CHECK-SAME: {{.*}} parallel_id=[[PAR_00:[0-9]+]] // CHECK-SAME: {{.*}} requested_team_size=2 // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_00:[0-9]+]] // CHECK-SAME: {{.*}} team_size=2, thread_num=0 // // barrier event is here // // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_00]] // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_0]] // CHECK: {{^}}[[MASTER]]: ompt_event_parallel_end: // CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]] // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0 // CHECK: {{^}}[[MASTER]]: ompt_event_teams_end: // CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]] // CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end: // CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1 // // team 0/thread 1 // // CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: {{.*}} parallel_id=[[PAR_00]], task_id=[[IMPL_TASK_01:[0-9]+]] // CHECK-SAME: {{.*}} team_size=2, thread_num=1 // // barrier event is here // // CHECK: {{^}}[[WORKER]]: ompt_event_implicit_task_end: // CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_01]]
omp_privateshared.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <malloc.h> /* compile with gcc -o PrivateShared -fopenmp PrivateShared.c */ int main(int argc, char** argv) { int i = 0; int tid; int size = 20; int * a = (int *)calloc(size, sizeof(int)); int * b = (int *)calloc(size, sizeof(int)); int * c; int * tids = (int *)calloc(size, sizeof(int)); printf("BEFORE\n"); for (i = 0; i < size; ++i) { a[i] = b[i] = i; printf("a[%d] = %d, b[%d] = %d\n", i, a[i], i, b[i]); } #pragma omp parallel shared(a, b, tids) private(c, i, tid) { tid = omp_get_thread_num(); c = (int *)malloc(sizeof(int)); #pragma omp for for (i = 0; i < size; ++i) { c[0] = tid * a[i]; a[i] = c[0]; b[i] += c[0]; tids[i] = tid; } free(c); } printf("AFTER\n"); for (i = 0; i < size; ++i) { printf("tid = %d, a[%d] = %d, b[%d] = %d\n", tids[i], i, a[i], i, b[i]); } free(a); free(b); free(tids); return 0; }
guide.c
#include <stdlib.h> #include <stdio.h> #include <math.h> float gaussian(float x, float y, float v, float k, float c) { return 1-v + v*k*exp(-(x*x+y*y)/(2*c*c)); } //unsigned char *forward_warping(const void *src, const void *idx, const void *idy, const void *z, int h, int w) void build_guide(const void *flow_x, const void *flow_y, const void *flow_valid, void *result, const int b, const int h, const int w, const float k, const float c) { float *forward_x = (float *)calloc(b * h * w, sizeof(float)); float *forward_y = (float *)calloc(b * h * w, sizeof(float)); for (int z = 0; z < b; z++) #pragma omp parallel for collapse(2) for (int y0 = 0; y0 < h; y0++) for (int x0 = 0; x0 < w; x0++) { if ( ((float*)flow_valid)[z*w*h + y0*w + x0] != 0) { forward_x[z*w*h + y0*w + x0] = ((float*)flow_x)[z*w*h + y0*w + x0] + x0; forward_y[z*w*h + y0*w + x0] = ((float*)flow_y)[z*w*h + y0*w + x0] + y0; } for (int y1 = 0; y1 < h; y1++) for (int x1 = 0; x1 < w; x1++) { // Accessing cell y=y0*w+x0 , x=y1*w+x1 // y term multiplied by row length (h*w) ((float*)result)[z*w*h*w*h + (y0*w+x0)*w*h + y1*w+x1] = gaussian(x1-forward_x[z*w*h + y0*w + x0], y1-forward_y[z*w*h + y0*w + x0], ((float*)flow_valid)[z*w*h + y0*w + x0], k, c); } } free(forward_x); free(forward_y); return; }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class PUBLIC_DOMAIN_TECHNOLOGYStmtBitfields { friend class ASTStmtReader; friend class PUBLIC_DOMAIN_TECHNOLOGYStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is tail-allocated. unsigned ResultKind : 2; /// The kind of Result as defined by APValue::Kind. unsigned APValueKind : 4; /// When ResultKind == RSK_Int64, true if the tail-allocated integer is /// unsigned. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated /// integer. 7 bits because it is the minimal number of bits to represent a /// value from 0 to 64 (the size of the tail-allocated integer). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the /// tail-allocated APValue. unsigned HasCleanup : 1; /// True if this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 14; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. According to [implimits] /// 8 bits would be enough, but we require (and test for) at least 16 bits /// to mirror FunctionType. unsigned NumArgs; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class LambdaExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class LambdaExpr; unsigned : NumExprBits; /// The default capture kind, which is a value of type /// LambdaCaptureDefault. unsigned CaptureDefault : 2; /// Whether this lambda had an explicit parameter list vs. an /// implicit (and empty) parameter list. unsigned ExplicitParams : 1; /// Whether this lambda had the result type explicitly specified. unsigned ExplicitResultType : 1; /// The number of captures. unsigned NumCaptures : 16; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; PUBLIC_DOMAIN_TECHNOLOGYStmtBitfields PUBLIC_DOMAIN_TECHNOLOGYStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; LambdaExprBitfields LambdaExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; /// PUBLIC_DOMAIN_TECHNOLOGYStmt - This represents a group of statements like { stmt stmt }. class PUBLIC_DOMAIN_TECHNOLOGYStmt final : public Stmt, private llvm::TrailingObjects<PUBLIC_DOMAIN_TECHNOLOGYStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; bool CheckStmt; /// The location of the closing "}". LBraceLoc is stored in PUBLIC_DOMAIN_TECHNOLOGYStmtBits. SourceLocation RBraceLoc; PUBLIC_DOMAIN_TECHNOLOGYStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit PUBLIC_DOMAIN_TECHNOLOGYStmt(EmptyShell Empty) : Stmt(PUBLIC_DOMAIN_TECHNOLOGYStmtClass, Empty) { CheckStmt = false; } void setStmts(ArrayRef<Stmt *> Stmts); public: static PUBLIC_DOMAIN_TECHNOLOGYStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit PUBLIC_DOMAIN_TECHNOLOGYStmt(SourceLocation Loc) : Stmt(PUBLIC_DOMAIN_TECHNOLOGYStmtClass), RBraceLoc(Loc) { PUBLIC_DOMAIN_TECHNOLOGYStmtBits.NumStmts = 0; PUBLIC_DOMAIN_TECHNOLOGYStmtBits.LBraceLoc = Loc; CheckStmt = false; } bool isCheckStmt() const { return CheckStmt; } void negateCheckStmt() { CheckStmt = false; } void setCheckStmt() { CheckStmt = true; } // Build an empty compound statement. static PUBLIC_DOMAIN_TECHNOLOGYStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.NumStmts == 0; } unsigned size() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<PUBLIC_DOMAIN_TECHNOLOGYStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return PUBLIC_DOMAIN_TECHNOLOGYStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == PUBLIC_DOMAIN_TECHNOLOGYStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
outofbounds-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The outmost loop is parallelized. But the inner level loop has out of bound access for b[i][j] when j==0. This will case memory access of a previous row's last element. For example, an array of 4x4: j=0 1 2 3 i=0 x x x x 1 x x x x 2 x x x x 3 x x x x outer loop: i=2, inner loop: j=0 array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3] due to linearized row-major storage of the 2-D array. This causes loop-carried data dependence between i=2 and i=1. */ #include <stdio.h> int main(int argc, char* argv[]) { int i,j; int n=100, m=100; double b[n][m]; #pragma omp parallel for private(j) for (i=0;i<n;i++) for (j=0;j<m;j++) // Note there will be out of bound access b[i][j]=b[i][j-1]; printf ("b[50][50]=%f\n",b[50][50]); return 0; }
SpatialAdaptiveMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.c" #else #define START_IND(a,b,c) (int)floor((float)(a * c) / b) #define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) // #define START_IND(a,b,c) a * c / b // #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 // 4d tensor B x D x H x W static void THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)( real *input_p, real *output_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { /* loop over output */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { int istartH = START_IND(oh, osizeH, isizeH); int iendH = END_IND(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = 0; ow < osizeW; ow++) { int istartW = START_IND(ow, osizeW, isizeW); int iendW = END_IND(ow, osizeW, isizeW); int kW = iendW - istartW; /* local pointers */ real *ip = input_p + d*istrideD + istartH*istrideH + istartW*istrideW; real *op = output_p + d*osizeH*osizeW + oh*osizeW + ow; THIndex_t *indp = ind_p + d*osizeH*osizeW + oh*osizeW + ow; /* compute local max: */ int64_t maxindex = -1; real maxval = -FLT_MAX; int64_t tcntr = 0; int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { real val = *(ip + ih*istrideH + iw*istrideW); if (val > maxval) { maxval = val; maxindex = (ih+istartH)*isizeW + (iw+istartW); } } } /* set output to local max */ *op = maxval; /* store location of max */ *indp = maxindex + TH_INDEX_BASE; } } } } void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int osizeW, int osizeH) { int dimW = 2; int dimH = 1; int64_t sizeB = 1; int64_t sizeD; int64_t isizeH; int64_t isizeW; int64_t istrideD; int64_t istrideH; int64_t istrideW; int64_t istrideB; real *input_data; real *output_data; THIndex_t *indices_data; THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); if (input->nDimension == 4) { istrideB = input->stride[0]; sizeB = input->size[0]; dimW++; dimH++; } /* sizes */ sizeD = input->size[dimH-1]; isizeH = input->size[dimH]; isizeW = input->size[dimW]; /* strides */ istrideD = input->stride[dimH-1]; istrideH = input->stride[dimH]; istrideW = input->stride[dimW]; /* resize output */ if (input->nDimension == 3) { THTensor_(resize3d)(output, sizeD, osizeH, osizeW); /* indices will contain i,j locations for each output point */ THIndexTensor_(resize3d)(indices, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data, indices_data, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } else { int64_t b; THTensor_(resize4d)(output, sizeB, sizeD, osizeH, osizeW); /* indices will contain i,j locations for each output point */ THIndexTensor_(resize4d)(indices, sizeB, sizeD, osizeH, osizeW); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data+b*istrideB, output_data+b*sizeD*osizeH*osizeW, indices_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } } } static void THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)( real *gradInput_p, real *gradOutput_p, THIndex_t *ind_p, int64_t sizeD, int64_t isizeH, int64_t isizeW, int64_t osizeH, int64_t osizeW) { int64_t d; #pragma omp parallel for private(d) for (d = 0; d < sizeD; d++) { real *gradInput_p_d = gradInput_p + d*isizeH*isizeW; real *gradOutput_p_d = gradOutput_p + d*osizeH*osizeW; THIndex_t *ind_p_d = ind_p + d*osizeH*osizeW; /* calculate max points */ int64_t oh, ow; for(oh = 0; oh < osizeH; oh++) { for(ow = 0; ow < osizeW; ow++) { /* retrieve position of max */ int64_t maxp = ind_p_d[oh*osizeW + ow] - TH_INDEX_BASE; /* update gradient */ gradInput_p_d[maxp] += gradOutput_p_d[oh*osizeW + ow]; } } } } void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices) { int dimW = 2; int dimH = 1; int64_t sizeB = 1; int sizeD; int isizeH; int isizeW; int osizeH; int osizeW; real *gradInput_data; real *gradOutput_data; THIndex_t *indices_data; /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); if (input->nDimension == 4) { sizeB = input->size[0]; dimW++; dimH++; } /* sizes */ sizeD = input->size[dimH-1]; isizeH = input->size[dimH]; isizeW = input->size[dimW]; osizeH = gradOutput->size[dimH]; osizeW = gradOutput->size[dimW]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THIndexTensor_(data)(indices); /* backprop */ if (input->nDimension == 3) { THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data, indices_data, sizeD, isizeH, isizeW, osizeH, osizeW); } else { int64_t b; #pragma omp parallel for private(b) for (b = 0; b < sizeB; b++) { THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data+b*sizeD*isizeH*isizeW, gradOutput_data+b*sizeD*osizeH*osizeW, indices_data+b*sizeD*osizeH*osizeW, sizeD, isizeH, isizeW, osizeH, osizeW); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
GB_iso_expand.c
//------------------------------------------------------------------------------ // GB_iso_expand: expand a scalar into an entire array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" void GB_iso_expand // expand an iso scalar into an entire array ( void *restrict X, // output array to expand into int64_t n, // # of entries in X void *restrict scalar, // scalar to expand into X size_t size, // size of the scalar and each entry of X GB_Context Context ) { //-------------------------------------------------------------------------- // determine how many threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (n, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // copy the value into X //-------------------------------------------------------------------------- int64_t p ; switch (size) { case GB_1BYTE : // bool, uint8, int8, and UDT of size 1 { uint8_t a0 = (*((uint8_t *) scalar)) ; uint8_t *restrict Z = (uint8_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_2BYTE : // uint16, int16, and UDT of size 2 { uint16_t a0 = (*((uint16_t *) scalar)) ; uint16_t *restrict Z = (uint16_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_4BYTE : // uint32, int32, float, and UDT of size 4 { uint32_t a0 = (*((uint32_t *) scalar)) ; uint32_t *restrict Z = (uint32_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_8BYTE : // uint64, int64, double, float complex, UDT size 8 { uint64_t a0 = (*((uint64_t *) scalar)) ; uint64_t *restrict Z = (uint64_t *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } } break ; case GB_16BYTE : // double complex, and UDT size 16 { GB_blob16 a0 = (*((GB_blob16 *) scalar)) ; GB_blob16 *restrict Z = (GB_blob16 *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { Z [p] = a0 ; } // uint64_t *restrict a = (uint64_t *) scalar ; // uint64_t *restrict Z = (uint64_t *) X ; // #pragma omp parallel for num_threads(nthreads) schedule(static) // for (p = 0 ; p < n ; p++) // { // Z [2*p ] = a [0] ; // Z [2*p+1] = a [1] ; // } } break ; default : // user-defined types of arbitrary size { GB_void *restrict Z = (GB_void *) X ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < n ; p++) { memcpy (Z + p*size, scalar, size) ; } } break ; } }
join.c
/* Copyright 2013-2015. The Regents of the University of California. * Copyright 2015. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013, 2015 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu> */ #include <stdbool.h> #include <complex.h> #include <string.h> #include <unistd.h> #include "num/multind.h" #include "num/init.h" #include "misc/mmio.h" #include "misc/debug.h" #include "misc/misc.h" #include "misc/opts.h" #include "misc/io.h" #ifndef DIMS #define DIMS 16 #endif #ifndef CFL_SIZE #define CFL_SIZE sizeof(complex float) #endif static const char usage_str[] = "dimension <input1> ... <inputn> <output>"; static const char help_str[] = "Join input files along {dimensions}. All other dimensions must have the same size.\n" "\t Example 1: join 0 slice_001 slice_002 slice_003 full_data\n" "\t Example 2: join 0 `seq -f \"slice_%%03g\" 0 255` full_data\n"; int main_join(int argc, char* argv[argc]) { bool append = false; const struct opt_s opts[] = { OPT_SET('a', &append, "append - only works for cfl files!"), }; cmdline(&argc, argv, 3, 10000, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); int N = DIMS; int dim = atoi(argv[1]); assert(dim < N); int count = argc - 3; if (append) { count += 1; assert(count > 1); int len = strlen(argv[argc - 1]); char buf[len + 5]; strcpy(buf, argv[argc - 1]); strcat(buf, ".cfl"); if (-1 == access(buf, F_OK)) { // make sure we do not have any other file format strcpy(buf, argv[argc - 1]); strcat(buf, ".coo"); assert(-1 == access(buf, F_OK)); strcpy(buf, argv[argc - 1]); strcat(buf, ".ra"); assert(-1 == access(buf, F_OK)); count--; append = false; } } long in_dims[count][N]; long offsets[count]; complex float* idata[count]; long sum = 0; // figure out size of output for (int l = 0, i = 0; i < count; i++) { const char* name = NULL; if (append && (i == 0)) { name = argv[argc - 1]; } else { name = argv[2 + l++]; } debug_printf(DP_DEBUG1, "loading %s\n", name); idata[i] = load_cfl(name, N, in_dims[i]); offsets[i] = sum; sum += in_dims[i][dim]; for (int j = 0; j < N; j++) assert((dim == j) || (in_dims[0][j] == in_dims[i][j])); if (append && (i == 0)) unmap_cfl(N, in_dims[i], idata[i]); } long out_dims[N]; for (int i = 0; i < N; i++) out_dims[i] = in_dims[0][i]; out_dims[dim] = sum; if (append) { // Here, we need to trick the IO subsystem into absolutely NOT // unlinking our input, as the same file is also an output here. io_unregister(argv[argc - 1]); } complex float* out_data = create_cfl(argv[argc - 1], N, out_dims); long ostr[N]; md_calc_strides(N, ostr, out_dims, CFL_SIZE); #pragma omp parallel for for (int i = 0; i < count; i++) { if (!(append && (0 == i))) { long pos[N]; md_singleton_strides(N, pos); pos[dim] = offsets[i]; long istr[N]; md_calc_strides(N, istr, in_dims[i], CFL_SIZE); md_copy_block(N, pos, out_dims, out_data, in_dims[i], idata[i], CFL_SIZE); unmap_cfl(N, in_dims[i], idata[i]); debug_printf(DP_DEBUG1, "done copying file %d\n", i); } } unmap_cfl(N, out_dims, out_data); return 0; }
expected_output.c
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <polybench.h> #include "3mm.h" /** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /*3mm.c: this file is part of PolyBench/C*/ /*Include polybench common header.*/ /*Include benchmark-specific header.*/ /*Array initialization.*/ static void init_array(int ni, int nj, int nk, int nl, int nm, double A[800][1000], double B[1000][900], double C[900][1200], double D[1200][1100]) { int i, j; for(i = 0; i < ni; i++) for(j = 0; j < nk; j++) A[i][j] = (double) ((i * j + 1) % ni) / (5 * ni); for(i = 0; i < nk; i++) for(j = 0; j < nj; j++) B[i][j] = (double) ((i * (j + 1) + 2) % nj) / (5 * nj); for(i = 0; i < nj; i++) for(j = 0; j < nm; j++) C[i][j] = (double) (i * (j + 3) % nl) / (5 * nl); for(i = 0; i < nm; i++) for(j = 0; j < nl; j++) D[i][j] = (double) ((i * (j + 2) + 2) % nk) / (5 * nk); } /*DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output.*/ static void print_array(int ni, int nl, double G[800][1100]) { int i, j; fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "G"); for(i = 0; i < ni; i++) for(j = 0; j < nl; j++) { if((i * ni + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2lf ", G[i][j]); } fprintf(stderr, "\nend dump: %s\n", "G"); fprintf(stderr, "==END DUMP_ARRAYS==\n"); } /*Main computational kernel. The whole function will be timed, including the call and return.*/ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, double E[800][900], double A[800][1000], double B[1000][900], double F[900][1100], double C[900][1200], double D[1200][1100], double G[800][1100]) { int i, j, k; #pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nj, nk, A, B) for(i = 0; i < ni; i++) { // #pragma omp parallel for default(shared) private(j, k) firstprivate(nj, i, nk, A, B) for(j = 0; j < nj; j++) { E[i][j] = 0.0; // #pragma omp parallel for default(shared) private(k) firstprivate(nk, i, j, A, B) reduction(+ : E[i][j]) for(k = 0; k < nk; ++k) E[i][j] += A[i][k] * B[k][j]; } } #pragma omp parallel for default(shared) private(i, j, k) firstprivate(nj, nl, nm, C, D) for(i = 0; i < nj; i++) { // #pragma omp parallel for default(shared) private(j, k) firstprivate(nl, i, nm, C, D) for(j = 0; j < nl; j++) { F[i][j] = 0.0; // #pragma omp parallel for default(shared) private(k) firstprivate(nm, i, j, C, D) reduction(+ : F[i][j]) for(k = 0; k < nm; ++k) F[i][j] += C[i][k] * D[k][j]; } } #pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nl, nj, E, F) for(i = 0; i < ni; i++) { // #pragma omp parallel for default(shared) private(j, k) firstprivate(nl, i, nj, E, F) for(j = 0; j < nl; j++) { G[i][j] = 0.0; // #pragma omp parallel for default(shared) private(k) firstprivate(nj, i, j, E, F) reduction(+ : G[i][j]) for(k = 0; k < nj; ++k) G[i][j] += E[i][k] * F[k][j]; } } } int main(int argc, char **argv) { /*Retrieve problem size.*/ int ni = 800; int nj = 900; int nk = 1000; int nl = 1100; int nm = 1200; /*Variable declaration/allocation.*/ double (*E)[800][900]; E = (double (*)[800][900]) polybench_alloc_data((800 + 0) * (900 + 0), sizeof(double)); ; double (*A)[800][1000]; A = (double (*)[800][1000]) polybench_alloc_data((800 + 0) * (1000 + 0), sizeof(double)); ; double (*B)[1000][900]; B = (double (*)[1000][900]) polybench_alloc_data((1000 + 0) * (900 + 0), sizeof(double)); ; double (*F)[900][1100]; F = (double (*)[900][1100]) polybench_alloc_data((900 + 0) * (1100 + 0), sizeof(double)); ; double (*C)[900][1200]; C = (double (*)[900][1200]) polybench_alloc_data((900 + 0) * (1200 + 0), sizeof(double)); ; double (*D)[1200][1100]; D = (double (*)[1200][1100]) polybench_alloc_data((1200 + 0) * (1100 + 0), sizeof(double)); ; double (*G)[800][1100]; G = (double (*)[800][1100]) polybench_alloc_data((800 + 0) * (1100 + 0), sizeof(double)); ; /*Initialize array(s).*/ init_array(ni, nj, nk, nl, nm, *A, *B, *C, *D); /*Start timer.*/ ; /*Run kernel.*/ kernel_3mm(ni, nj, nk, nl, nm, *E, *A, *B, *F, *C, *D, *G); /*Stop and print timer.*/ ; ; /*Prevent dead-code elimination. All live-out data must be printed by the function call in argument.*/ if(argc > 42 && !strcmp(argv[0], "")) print_array(ni, nl, *G); /*Be clean.*/ free((void *) E); ; free((void *) A); ; free((void *) B); ; free((void *) F); ; free((void *) C); ; free((void *) D); ; free((void *) G); ; return 0; }
DiffusionMASK_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2017 Daniil Kazantsev * Copyright 2017 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "DiffusionMASK_core.h" #include "utils.h" #define EPS 1.0e-5 #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) /*sign function*/ int signNDF_m(float x) { return (x > 0) - (x < 0); } /* C-OMP implementation of linear and nonlinear diffusion [1,2] which is constrained by the provided MASK. * The minimisation is performed using the explicit scheme. * Implementation using the Diffusivity window to increase the coverage area of the diffusivity * * Input Parameters: * 1. Noisy image/volume * 2. MASK (in unsigned short format) * 3. Diffusivity window (half-size of the searching window, e.g. 1) * 4. lambda - regularisation parameter (a constant or the same size as the input (1)) * 5. Edge-preserving parameter (sigma), when sigma equals to zero nonlinear diffusion -> linear diffusion * 6. Number of iterations, for explicit scheme >= 150 is recommended * 7. tau - time-marching step for explicit scheme * 8. Penalty type: 1 - Huber, 2 - Perona-Malik, 3 - Tukey Biweight * 9. eplsilon - tolerance constant * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the paper by * [1] Perona, P. and Malik, J., 1990. Scale-space and edge detection using anisotropic diffusion. IEEE Transactions on pattern analysis and machine intelligence, 12(7), pp.629-639. * [2] Black, M.J., Sapiro, G., Marimont, D.H. and Heeger, D., 1998. Robust anisotropic diffusion. IEEE Transactions on image processing, 7(3), pp.421-432. */ float DiffusionMASK_CPU_main(float *Input, unsigned char *MASK, float *Output, float *infovector, int DiffusWindow, float *lambdaPar, int lambda_is_arr, float sigmaPar, int iterationsNumb, float tau, int penaltytype, float epsil, int dimX, int dimY, int dimZ) { long i,j,k; int counterG; float sigmaPar2, *Output_prev=NULL, *Eucl_Vec; int DiffusWindow_tot; sigmaPar2 = sigmaPar/sqrt(2.0f); long DimTotal; float re, re1; re = 0.0f; re1 = 0.0f; int count = 0; DimTotal = (long)(dimX*dimY*dimZ); /*Euclidian weight for diffisuvuty window*/ if (dimZ == 1) { DiffusWindow_tot = (2*DiffusWindow + 1)*(2*DiffusWindow + 1); /* generate a 2D Gaussian kernel for NLM procedure */ Eucl_Vec = (float*) calloc (DiffusWindow_tot,sizeof(float)); counterG = 0; for(i=-DiffusWindow; i<=DiffusWindow; i++) { for(j=-DiffusWindow; j<=DiffusWindow; j++) { Eucl_Vec[counterG] = (float)expf(-(powf(((float) i), 2) + powf(((float) j), 2))/(2.0f*DiffusWindow*DiffusWindow)); counterG++; }} /*main neighb loop */ } else { DiffusWindow_tot = (2*DiffusWindow + 1)*(2*DiffusWindow + 1)*(2*DiffusWindow + 1); Eucl_Vec = (float*) calloc (DiffusWindow_tot,sizeof(float)); counterG = 0; for(i=-DiffusWindow; i<=DiffusWindow; i++) { for(j=-DiffusWindow; j<=DiffusWindow; j++) { for(k=-DiffusWindow; k<=DiffusWindow; k++) { Eucl_Vec[counterG] = (float)expf(-(powf(((float) i), 2) + powf(((float) j), 2) + powf(((float) k), 2))/(2*DiffusWindow*DiffusWindow*DiffusWindow)); counterG++; }}} /*main neighb loop */ } if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float)); /* copy input into output */ copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ)); /* Start diffusivity iterations usign MASK */ for(i=0; i < iterationsNumb; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); if (dimZ == 1) { /* running 2D diffusion iterations */ if (sigmaPar == 0.0f) LinearDiff_MASK2D(Input, MASK, Output, Eucl_Vec, DiffusWindow, lambdaPar, lambda_is_arr, tau, (long)(dimX), (long)(dimY)); /* constrained linear diffusion */ else NonLinearDiff_MASK2D(Input, MASK, Output, Eucl_Vec, DiffusWindow, lambdaPar, lambda_is_arr, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY)); /* constrained nonlinear diffusion */ } else { /* running 3D diffusion iterations */ //if (sigmaPar == 0.0f) LinearDiff3D(Input, Output, lambdaPar, tau, (long)(dimX), (long)(dimY), (long)(dimZ)); // else NonLinearDiff3D(Input, Output, lambdaPar, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY), (long)(dimZ)); } /* check early stopping criteria if epsilon not equal zero */ if ((epsil != 0.0f) && (i % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); /* stop if the norm residual is less than the tolerance EPS */ if (re < epsil) count++; if (count > 3) break; } } free(Output_prev); free(Eucl_Vec); /*adding info into info_vector */ infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ /* MASKED-constrained 2D linear diffusion (PDE heat equation) */ float LinearDiff_MASK2D(float *Input, unsigned char *MASK, float *Output, float *Eucl_Vec, int DiffusWindow, float *lambdaPar, int lambda_is_arr, float tau, long dimX, long dimY) { long i,j,i1,j1,i_m,j_m,index,indexneighb,counter; unsigned char class_c, class_n; float diffVal, lambda_val; #pragma omp parallel for shared(Input) private(index,i,j,i1,j1,i_m,j_m,counter,diffVal,indexneighb,class_c,class_n,lambda_val) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; /* current pixel index */ lambda_val = *(lambdaPar + index* lambda_is_arr); counter = 0; diffVal = 0.0f; for(i_m=-DiffusWindow; i_m<=DiffusWindow; i_m++) { for(j_m=-DiffusWindow; j_m<=DiffusWindow; j_m++) { i1 = i+i_m; j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { indexneighb = j1*dimX+i1; /* neighbour pixel index */ class_c = MASK[index]; /* current class value */ class_n = MASK[indexneighb]; /* neighbour class value */ /* perform diffusion only within the same class (given by MASK) */ if (class_n == class_c) diffVal += Output[indexneighb] - Output[index]; } counter++; }} Output[index] += tau*(lambda_val*(diffVal) - (Output[index] - Input[index])); }} return *Output; } /* MASKED-constrained 2D nonlinear diffusion */ float NonLinearDiff_MASK2D(float *Input, unsigned char *MASK, float *Output, float *Eucl_Vec, int DiffusWindow, float *lambdaPar, int lambda_is_arr, float sigmaPar, float tau, int penaltytype, long dimX, long dimY) { long i,j,i1,j1,i_m,j_m,index,indexneighb,counter; unsigned char class_c, class_n; float diffVal, funcVal, lambda_val; #pragma omp parallel for shared(Input) private(index,i,j,i1,j1,i_m,j_m,counter,diffVal,funcVal,indexneighb,class_c,class_n,lambda_val) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; /* current pixel index */ lambda_val = *(lambdaPar + index* lambda_is_arr); counter = 0; diffVal = 0.0f; funcVal = 0.0f; for(i_m=-DiffusWindow; i_m<=DiffusWindow; i_m++) { for(j_m=-DiffusWindow; j_m<=DiffusWindow; j_m++) { i1 = i+i_m; j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { indexneighb = j1*dimX+i1; /* neighbour pixel index */ class_c = MASK[index]; /* current class value */ class_n = MASK[indexneighb]; /* neighbour class value */ /* perform diffusion only within the same class (given by MASK) */ if (class_n == class_c) { diffVal = Output[indexneighb] - Output[index]; if (penaltytype == 1) { /* Huber penalty */ if (fabs(diffVal) > sigmaPar) funcVal += signNDF_m(diffVal); else funcVal += diffVal/sigmaPar; } else if (penaltytype == 2) { /* Perona-Malik */ funcVal += (diffVal)/(1.0f + powf((diffVal/sigmaPar),2)); } else if (penaltytype == 3) { /* Tukey Biweight */ if (fabs(diffVal) <= sigmaPar) funcVal += diffVal*powf((1.0f - powf((diffVal/sigmaPar),2)), 2); } else { printf("%s \n", "No penalty function selected! Use Huber,2 or 3."); break; } } } counter++; }} Output[index] += tau*(lambda_val*(funcVal) - (Output[index] - Input[index])); }} return *Output; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/
MobileNet_CPU_cifar.c
/* Pretrained MobileNet Convolutional Neural Network in C language and OpenMP API GitHUB Page: https://github.com/jcanore/vgg16 Author: ZFTurbo/jocare Compilation: gcc -O3 MobileNet_CPU_cifar.c -lm -fopenmp -o MobileNet_CPU_cifar Usage: MobileNet_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)> Example: MobileNet_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <omp.h> #include <string.h> #include "sparse.h" double get_seconds(struct timeval tStart, struct timeval tEnd) { return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6; } #define SIZE 32 #define CONV_SIZE 3 #define CONV_LEVELS 27 //#define _CRT_SECURE_NO_WARNINGS 1 // precompile variables // assure default values if nothing provided #ifndef SPARSE_CONVOLUTIONS #define SPARSE_CONVOLUTIONS 0 // default dense convolutions #endif // SPARSE_CONVOLUTIONS #ifndef FIRST_CONV_SPARSE #define FIRST_CONV_SPARSE 0 // this is almost never 1 #endif // FIRST_CONV_SPARSE #ifndef SPARSE_FULLY_CONNECTED #define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet #endif // SPARSE_FULLY_CONNECTED #ifndef FISHER_PRUNING #define FISHER_PRUNING 0 // set for fisher pruning, all previous variables changed to dense #endif // FISHER_PRUNING #ifndef NUMBER_OF_THREADS #define NUMBER_OF_THREADS 1 // number of threads to run on //#define NUMBER_OF_THREADS omp_get_num_procs() - 1 #endif // NUMBER_OF_THREADS /****************************************************************************************************************************/ int im_sizes[27] = { 32, 32, 16, 16, 16, 16, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2 }; int strides[26] = { 1, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1 }; int mem_block_shape[3] = { 1024, 32, 32 }; // allocate the absolute maximum amount of space we will need float ***block1; float ***block2; float *****wc; // weights convolution float ***wd; // weights dense float **bd; // biases dense float **batchnorm_weights; float **batchnorm_biases; float **batchnorm_means; // running mean and variance from training used to estimate population statistics float **batchnorm_vars; int mem_block_dense_shape = { 1024 * 2 * 2 }; // size of output from last convolutional layer float *mem_block1_dense; float *mem_block2_dense; #if SPARSE_CONVOLUTIONS // sparse conv csr_t ****wc_sparse; #endif // SPARSE_CONVOLUTIONS #if FISHER_PRUNING #define SPARSE_CONVOLUTIONS 0 // force dense convolutions /* // ORIGINAL FISHER EXPERIMENTS int cshape[27][4] = { { 32, 3, CONV_SIZE, CONV_SIZE }, { 32, 1, CONV_SIZE, CONV_SIZE }, { 43, 32, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; */ // FIXED 90% ACCURACY EXPERIMENTS int cshape[27][4] = { { 32, 3, CONV_SIZE, CONV_SIZE }, { 32, 1, CONV_SIZE, CONV_SIZE }, { 43, 32, 1, 1 }, { 43, 1, CONV_SIZE, CONV_SIZE }, { 85, 43, 1, 1 }, { 85, 1, CONV_SIZE, CONV_SIZE }, { 70, 85, 1, 1 }, { 70, 1, CONV_SIZE, CONV_SIZE }, { 150, 70, 1, 1 }, { 150, 1, CONV_SIZE, CONV_SIZE }, { 69, 150, 1, 1 }, { 69, 1, CONV_SIZE, CONV_SIZE }, { 188, 69, 1, 1 }, { 188, 1, CONV_SIZE, CONV_SIZE }, { 72, 188, 1, 1 }, { 72, 1, CONV_SIZE, CONV_SIZE }, { 122, 72, 1, 1 }, { 122, 1, CONV_SIZE, CONV_SIZE }, { 106, 122, 1, 1 }, { 106, 1, CONV_SIZE, CONV_SIZE }, { 96, 106, 1, 1 }, { 96, 1, CONV_SIZE, CONV_SIZE }, { 81, 96, 1, 1 }, { 81, 1, CONV_SIZE, CONV_SIZE }, { 75, 81, 1, 1 }, { 75, 1, CONV_SIZE, CONV_SIZE }, { 100, 75, 1, 1 } }; int dshape[1][2]= { { 100, 10} }; #else // PLAIN int cshape[27][4] = { { 32, 3, CONV_SIZE, CONV_SIZE }, { 32, 1, CONV_SIZE, CONV_SIZE }, { 64, 32, 1, 1 }, { 64, 1, CONV_SIZE, CONV_SIZE }, { 128, 64, 1, 1 }, { 128, 1, CONV_SIZE, CONV_SIZE }, { 128, 128, 1, 1 }, { 128, 1, CONV_SIZE, CONV_SIZE }, { 256, 128, 1, 1 }, { 256, 1, CONV_SIZE, CONV_SIZE }, { 256, 256, 1, 1 }, { 256, 1, CONV_SIZE, CONV_SIZE }, { 512, 256, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 512, 512, 1, 1 }, { 512, 1, CONV_SIZE, CONV_SIZE }, { 1024, 512, 1, 1 }, { 1024, 1, CONV_SIZE, CONV_SIZE }, { 1024, 1024, 1, 1 } }; int dshape[1][2]= { { 1024, 10} }; #endif // FISHER_PRUNING /****************************************************************************************************************************/ void reset_mem_block(float ***mem) { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem[i][j][k] = 0.0; } } } } /****************************************************************************************************************************/ void reset_mem_block_dense(float *mem) { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem[i] = 0.0; } } /****************************************************************************************************************************/ void init_memory() { int i, j, k, l; int max_channels = 1024; int max_im_size = 32; block1 = malloc(max_channels * sizeof(float**)); block2 = malloc(max_channels * sizeof(float**)); // allocate block memory for(i = 0; i < max_channels; i++) { block1[i] = malloc(max_im_size * sizeof(float*)); block2[i] = malloc(max_im_size * sizeof(float*)); for(j = 0; j < max_im_size; j++) { block1[i][j] = malloc(max_im_size * sizeof(float)); block2[i][j] = malloc(max_im_size * sizeof(float)); } } #if SPARSE_CONVOLUTIONS wc_sparse = (csr_t****) malloc(CONV_LEVELS * sizeof(csr_t***)); for (l = 0; l < CONV_LEVELS; l++) { wc_sparse[l] = (csr_t***) malloc(cshape[l][0] * sizeof(csr_t**)); for (i = 0; i < cshape[l][0]; i++) { wc_sparse[l][i] = (csr_t**) malloc(cshape[l][1] * sizeof(csr_t*)); } } // wc memory allocated below will be freed in read_weights if SPARSE_CONVOLUTIONS #endif // SPARSE_CONVOLUTIONS wc = malloc(CONV_LEVELS * sizeof(float****)); // allocate kernel memory for(l = 0; l < CONV_LEVELS; l++) { wc[l] = malloc(cshape[l][0] * sizeof(float***)); for(i = 0; i < cshape[l][0]; i++) { wc[l][i] = malloc(cshape[l][1] * sizeof(float**)); for(j = 0; j < cshape[l][1]; j++) { wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*)); for(k = 0; k < cshape[l][2]; k++) { wc[l][i][j][k] = malloc(cshape[l][3]* sizeof(float)); } } } } // allocate batchnorm memory batchnorm_weights = malloc(27 * sizeof(float*)); batchnorm_biases = malloc(27 * sizeof(float*)); batchnorm_means = malloc(27 * sizeof(float*)); batchnorm_vars = malloc(27 * sizeof(float*)); for (l = 0; l < CONV_LEVELS; l++) { batchnorm_weights[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_biases[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_means[l] = malloc(cshape[l][0] * sizeof(float)); batchnorm_vars[l] = malloc(cshape[l][0] * sizeof(float)); } wd = malloc(1 * sizeof(float**)); bd = malloc(1 * sizeof(float*)); for (l = 0; l < 1; l++) { wd[l] = malloc(dshape[l][0] * sizeof(float*)); for (i = 0; i < dshape[l][0]; i++) { wd[l][i] = malloc(dshape[l][1] * sizeof(float)); } bd[l] = malloc(dshape[l][1] * sizeof(float)); } // allocate dense memory mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float)); mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float)); } /****************************************************************************************************************************/ void free_memory() { int i, j, k, l; // Free convolution weights for (l = 0; l < CONV_LEVELS; l++) { #if SPARSE_CONVOLUTIONS for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { free(wc_sparse[l][i][j]); } free(wc_sparse[l][i]); } free(wc_sparse[l]); #else for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); #endif } // free(wc); // free(bc); #if SPARSE_CONVOLUTIONS free(wc_sparse); #else free(wc); #endif // SPARSE_CONVOLUTIONS // Free dense weights for (l = 0; l < 1; l++) { for (i = 0; i < dshape[l][0]; i++) { free(wd[l][i]); } free(wd[l]); free(bd[l]); } free(wd); free(bd); // Free memblocks for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { free(block1[i][j]); free(block2[i][j]); } free(block1[i]); free(block2[i]); } free(block1); free(block2); free(mem_block1_dense); free(mem_block2_dense); } /****************************************************************************************************************************/ void read_weights(char *in_file, int lvls) { float dval; int i, j, k, l, m, z; FILE *iin; int total_lvls_read = 0; // printf("\nin_file es: %s\n\n", in_file); iin = fopen64(in_file, "r"); if (iin == NULL) { printf("Weights file %s absent\n", in_file); exit(1); } // Reading convolution weights (store them flipped from begining) // no biases for (l = 0; l < CONV_LEVELS; l++) { printf("Read conv block %d weights\n", l); for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { for (m = 0; m < cshape[l][3]; m++) { fscanf(iin, "%f", &dval); wc[l][i][j][k][m] = dval; } } } } total_lvls_read += 1; } for (z = 0; z < CONV_LEVELS; z++) { // batchnorm weights and biases printf("Read batchnorm block %d weights\n", z); for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); batchnorm_weights[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_biases[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_means[z][i] = dval; } for (i = 0; i < cshape[z][0]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); batchnorm_vars[z][i] = dval; } } if (total_lvls_read >= lvls && lvls != -1) return; // Reading dense weights int num_dense_layers = 1; for (z = 0; z < num_dense_layers; z++) { printf("Read dense block %d weights\n", z); for (i = 0; i < dshape[z][0]; i++) { for (j = 0; j < dshape[z][1]; j++) { fscanf(iin, "%f", &dval); //printf("weight: %i : %f \n", i, dval); wd[z][i][j] = dval; } } for (i = 0; i < dshape[z][1]; i++) { fscanf(iin, "%f", &dval); //printf("bias %i : %f \n", i, dval); bd[z][i] = dval; } } fclose(iin); /////////////**************** SPARSE ************///////////////////////////// #if SPARSE_CONVOLUTIONS // convert to sparse format for (l = 0; l < CONV_LEVELS; l++) for (i = 0; i < cshape[l][0]; i++) for (j = 0; j < cshape[l][1]; j++) { //printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); csr_t* a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]); //print_csr(a); wc_sparse[l][i][j] = a; //printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]); } // Free convolution weights #if FIRST_CONV_SPARSE == 0 l = 0; // allocate new memory for first conv and copy from wc float *****wc_first_conv = (float*****) malloc(1 * sizeof(float****)); wc_first_conv[l] = (float****) malloc(cshape[l][0] * sizeof(float***)); int k1, k2; for (i = 0; i < cshape[l][0]; i++) { wc_first_conv[l][i] = (float***) malloc(cshape[l][1] * sizeof(float**)); for (j = 0; j < cshape[l][1]; j++) { wc_first_conv[l][i][j] = (float**) malloc(cshape[l][2] * sizeof(float*)); for (k1 = 0; k1 < cshape[l][2]; k1++) { wc_first_conv[l][i][j][k1] = (float*) malloc(cshape[l][3] * sizeof(float)); for (k2 = 0; k2 < cshape[l][3]; k2++) wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2]; } } } #endif // FIRST_CONV_SPARSE == 0 // free up all dense conv layer representation for (l = 0; l < CONV_LEVELS; l++) { for (i = 0; i < cshape[l][0]; i++) { for (j = 0; j < cshape[l][1]; j++) { for (k = 0; k < cshape[l][2]; k++) { free(wc[l][i][j][k]); } free(wc[l][i][j]); } free(wc[l][i]); } free(wc[l]); } free(wc); #if FIRST_CONV_SPARSE == 0 // replace old wc pointer with the data for only first conv layer created above wc = wc_first_conv; #endif // FIRST_CONV_SPARSE == 0 #endif // SPARSE_CONVOLUTIONS } /****************************************************************************************************************************/ void read_image(char *in_file) { int i, j, l; FILE *iin; float dval; iin = fopen(in_file, "r"); if (iin == NULL) { printf("Image file %s absent\n", in_file); exit(1); } /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { fscanf(iin, "%f", &dval); block1[l][i][j] = dval; } } } } /****************************************************************************************************************************/ void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) { int i, j; float sum; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i=i+stride) { for (j = 0; j < size; j=j+stride) { sum = zeropad[i ][j ] * kernel[0][0] + zeropad[i ][j + 1] * kernel[0][1] + zeropad[i ][j + 2] * kernel[0][2] + zeropad[i + 1][j ] * kernel[1][0] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j ] * kernel[2][0] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i + 2][j + 2] * kernel[2][2]; out[i][j] += sum; } } } /****************************************************************************************************************************/ void convolution_3_x_3_sparse(float **matrix, csr_t* kernel, float **out, int size, int stride) { // printf("sparse\n"); int i, j; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } // // convolution // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // out[i][j] += s_csr_conv(kernel, zeropad, i, j); // } // } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); int k,l; float sum; // convolution for (i = 0; i < size; i+=stride) { for (j = 0; j < size; j+=stride) { //out[i][j] += s_csr_conv(kernel, zeropad, i, j); sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ void pointwise_convolution(float ****point_kernel, float ***block2, float ***block1, int input_channels, int output_channels, int image_size) { int i, j, k, l; float sum; #pragma omp parallel for private(i,j,k,l) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < output_channels; i++) { for(j = 0; j < image_size; j++) { for(k = 0; k < image_size; k++) { sum = 0.; for(l = 0; l < input_channels; l++) { sum += block2[l][j][k] * point_kernel[i][l][0][0]; // 0 because they are always 1x1 filters } block1[i][j][k] = sum; } } } } /****************************************************************************************************************************/ void pointwise_convolution_sparse(float **matrix, csr_t* kernel, float **out, int size) { // printf("sparse\n"); int i, j; // float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; float zeropad[size+2][size+2]; memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } // float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2]; // for (i = 0; i < (size + 2); ++i) // zeropad[i] = (float*) malloc ((size + 2) * sizeof(float)); // //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // // padding with zeros // for (i = 0; i < size + 2; ++i) { // zeropad[i][0] = 0; // zeropad[i][size + 1] = 0; // } // for (i = 1; i < size + 1; ++i) { // zeropad[0][i] = 0; // zeropad[size + 1][i] = 0; // } // // copying input value // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // zeropad[i + 1][j + 1] = matrix[i][j]; // } // } // // convolution // for (i = 0; i < size; ++i) { // for (j = 0; j < size; ++j) { // out[i][j] += s_csr_conv(kernel, zeropad, i, j); // } // } // for (i = 0; i < (size + 2); ++i) // free(zeropad[i]); // free(zeropad); int k,l; float sum; // convolution for (i = 0; i < size; ++i) { for (j = 0; j < size; ++j) { //out[i][j] += s_csr_conv(kernel, zeropad, i, j); sum = 0.f; for (k = 0; k < kernel->nrows; ++k) { // for every nonzero element in this row for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) { // Scale the corresponding row of B with the nonzero value of A float value = kernel->values[l]; int col = kernel->colind[l]; sum += value * zeropad[i + k][j + col]; } } out[i][j] += sum; } } } /****************************************************************************************************************************/ void batchnorm_and_relu(float ***in, float ***out, float *weights, float *bias, float *mean, float *var, int num_channels, int image_size) { int channel, i, j; // ((x - mean) * invstd) * w + b #pragma omp parallel for private(channel,i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(channel = 0; channel < num_channels; channel++) { float invstd = 1. / sqrt(var[channel] + 0.000001); for(i = 0; i < image_size; i++) { for(j = 0; j < image_size; j++) { out[channel][i][j] = (weights[channel] * invstd ) * in[channel][i][j] + (bias[channel] - ((weights[channel] * mean[channel]) * invstd)); //out[channel][i][j] = ((in[channel][i][j] - mean[channel]) * invstd) * weights[channel] + bias[channel]; if (out[channel][i][j] < 0.f) out[channel][i][j] = 0.f; } } } } /****************************************************************************************************************************/ void depthwise_convolution(float ***block1, float ***block2, float ****depth_kernel, float ****point_kernel, int level) { int i, j; int input_channels = cshape[level][0]; int output_channels = cshape[level+1][0]; //printf("level %i: %i ==> %i\n", level, input_channels, output_channels); #pragma omp parallel for private(i) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(i = 0; i < input_channels; i++) { #if SPARSE_CONVOLUTIONS convolution_3_x_3_sparse(block1[i], wc_sparse[level][i][0], block2[i], im_sizes[level], strides[level]); #else convolution_3_x_3(block1[i], depth_kernel[i][0], block2[i], im_sizes[level], strides[level]); #endif } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], input_channels, im_sizes[level+1]); reset_mem_block(block2); level++; // now do linear combination of the elements in output and write them back into the first memory block #if SPARSE_CONVOLUTIONS #pragma omp parallel for private(i,j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for(i = 0; i < output_channels; i++) { for(j = 0; j < input_channels; j++) { pointwise_convolution_sparse(block2[j], wc_sparse[level][i][j], block1[j], im_sizes[level] ); } } #else pointwise_convolution(point_kernel, block1, block2, input_channels, output_channels, im_sizes[level]); #endif batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], output_channels, im_sizes[level+1]); reset_mem_block(block2); } /****************************************************************************************************************************/ void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) { int i; for (i = 0; i < size; i++) { out[i] += bs[i]; // printf("%f\n", out[i]); if (relu == 1) { if (out[i] < 0) out[i] = 0.f; } } } /****************************************************************************************************************************/ void flatten(float ***in, float *out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } } /****************************************************************************************************************************/ void dense(float *in, float **weights, float *out, int sh_in, int sh_out) { int i, j; #pragma omp parallel for private(i, j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } } /****************************************************************************************************************************/ void write_out_block(int layer, float ***block) { int layer_name = layer;// * 2 - 1; char filename[16]; sprintf(filename, "outputs/output%d", layer_name); FILE *f = fopen(filename, "w"); if (f == NULL){ printf("Error opening file!\n"); exit(1); } for(int i = 0; i < 32; i++) { for(int j = 0; j < mem_block_shape[1]; j++) { for(int k = 0; k < mem_block_shape[2]; k++) { fprintf(f, "%f \n", block[i][j][k]); } } } fclose(f); } /****************************************************************************************************************************/ void write_out_layer(int layer) { int layer_name = layer;// * 2 - 1; char filename[7]; sprintf(filename, "layer%d", layer_name); FILE *f = fopen(filename, "w"); int depth = 1; if (f == NULL){ printf("Error opening file!\n"); exit(1); } for(int o = 0; o < cshape[layer][0]; o++) { for(int i = 0; i < cshape[layer][1]; i++) { for(int k_h = 0; k_h < cshape[layer][2]; k_h++) { for(int k_w = 0; k_w < cshape[layer][3]; k_w++) { fprintf(f, "%f ", wc[layer][o][i][k_h][k_w]); } } fprintf(f, "\n"); } } fclose(f); layer_name = layer + 1; char filename2[7]; sprintf(filename2, "layer%d", layer_name); // get batchnorms FILE *f2 = fopen(filename2, "w"); if (f2 == NULL){ printf("Error opening file!\n"); exit(1); } for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_weights[layer][i]); } fprintf(f2, "\n\n\n"); for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_biases[layer][i]); } fprintf(f2, "\n\n\n"); for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_means[layer][i]); } fprintf(f2, "\n\n\n"); for(int i = 0; i < cshape[layer][0]; i++) { fprintf(f2, "%f \n", batchnorm_vars[layer][i]); } fclose(f); } /****************************************************************************************************************************/ void output_predictions(FILE *out, int only_convolution, int size, int cur_size) { int i; int c=0; if (only_convolution == 1) { //for (i = 0; i < 512*7*7; i++) { for (i = 0; i < size * cur_size * cur_size; i++) { fprintf(out, "%g\n", mem_block1_dense[i]); } } else { double maximum=-1; // dshape[0][1] ==> 10 for (i = 0; i < dshape[0][1]; i++) { fprintf(out, "%g\n", mem_block2_dense[i]); if(mem_block1_dense[i]>maximum){ maximum=mem_block2_dense[i]; c=i+1; } } fprintf(out, "\n"); printf("This image depicts class: %d\n",c); } } /****************************************************************************************************************************/ void get_mobilenet_predict() { int level = 0; int i, j; // normal convolution #pragma omp parallel for private(i, j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS) for (i = 0; i < cshape[level][0]; i++) { for (j = 0; j < cshape[level][1]; j++) { #if FIRST_CONV_SPARSE convolution_3_x_3_sparse(block1[j], wc_sparse[level][i][j], block2[i], im_sizes[level], 1); #else convolution_3_x_3(block1[j], wc[level][i][j], block2[i], im_sizes[level], 1); #endif } } batchnorm_and_relu(block2, block1, batchnorm_weights[level], batchnorm_biases[level], batchnorm_means[level], batchnorm_vars[level], 32, 32); reset_mem_block(block2); // depthwise convolutions for(level = 1; level < (CONV_LEVELS - 1); level=level+2) { depthwise_convolution(block1, block2, wc[level], wc[level+1], (level)); } // flatten flatten(block1, mem_block1_dense, cshape[level][0], im_sizes[level], im_sizes[level]); // dense level = 0; dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]); add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 0); reset_mem_block_dense(mem_block1_dense); return; } /****************************************************************************************************************************/ char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } /****************************************************************************************************************************/ int main(int argc, char *argv[]) { FILE *file_list, *results; char buf[1024]; struct timeval tStart, tEnd; double deltaTime; char *weights_file; char *image_list_file; char *output_file; int lvls = -1; int only_convolution = 0; //----------------------------------------------------------------------- printf("Using %d threads\n", NUMBER_OF_THREADS); if (argc != 4 && argc != 5) { printf("Usage: <program.exe> <weights file> <images list file> <output file> <only_convolution [optional]>\n"); return 0; } weights_file = argv[1]; //printf("%s\n", weights_file); image_list_file = argv[2]; output_file = argv[3]; if (argc == 5) { lvls = 20; only_convolution = 1; } //----------------------------------------------------------------------- init_memory(); file_list = fopen(image_list_file, "r"); if (file_list == NULL) { printf("Check file list location: %s\n", image_list_file); return 1; } results = fopen(output_file, "w"); if (results == NULL) { printf("Couldn't open file for writing: %s\n", output_file); return 1; } gettimeofday(&tStart, NULL); read_weights(weights_file, lvls); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Reading weights: %.3lf sec\n", deltaTime); while (!feof(file_list)) { fgets(buf, 1024, file_list); if (strlen(buf) == 0) { break; } // printf("%d\n", strlen(buf)); read_image(trimwhitespace(buf)); gettimeofday(&tStart, NULL); get_mobilenet_predict(results, only_convolution); gettimeofday(&tEnd, NULL); deltaTime = get_seconds(tStart, tEnd); printf("Infer image %s: %.3lf sec\n", buf, deltaTime); output_predictions(results, only_convolution, 1024, 1); } //free_memory(); fclose(file_list); return 0; }
zipmonster_fmt_plug.c
/* This format is reverse engineered from InsidePro Hash Manager! * * This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_zipmonster; #elif FMT_REGISTERS_H john_register_one(&fmt_zipmonster); #else #include "arch.h" #include "sha.h" #include "md5.h" #include <string.h> #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "simd-intrinsics.h" //#undef SIMD_COEF_32 #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "ZipMonster" #define FORMAT_NAME "MD5(ZipMonster)" #define ALGORITHM_NAME "MD5-" MD5_ALGORITHM_NAME " x 50000" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE 0 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #ifdef SIMD_COEF_32 #define MAX_KEYS_PER_CRYPT (SIMD_PARA_MD5*SIMD_COEF_32) #else #define MAX_KEYS_PER_CRYPT 1 #endif #define FORMAT_TAG "$zipmonster$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct fmt_tests zipmonster_tests[] = { {"$zipmonster$e0f68d6f40c5f157c169e9ca0a6f09fe", "!"}, {"4dac447f100ee85327db2b47e295e50d", "1"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static unsigned short itoa16u_w[256]; #ifdef SIMD_COEF_32 #define GETPOS(i,index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #endif static void init(struct fmt_main *self) { int i; char buf[3]; #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); for (i = 0; i < 256; ++i) { sprintf(buf, "%X%X", i>>4, i&0xF); memcpy(&(itoa16u_w[i]), buf, 2); } } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; if(!p) return 0; if (!ishexlc(p)) return 0; if (strlen(p) != BINARY_SIZE * 2) return 0; return 1; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p = ciphertext; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE && *p; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } #ifndef SIMD_COEF_32 static inline void hex_encode_uppercase(unsigned char *str, unsigned char *_out) { int i; unsigned short *out = (unsigned short*)_out; for (i = 0; i < BINARY_SIZE; ++i) { out[i] = itoa16u_w[str[i]]; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; int inc = 1; #ifdef SIMD_COEF_32 inc = SIMD_COEF_32*SIMD_PARA_MD5; #endif #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += inc) { unsigned char buffer[BINARY_SIZE]; MD5_CTX ctx; int n = 49999; #ifdef SIMD_COEF_32 int j, k; uint32_t *p, t; uint8_t ib[64 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD]; uint8_t ob[16 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD]; uint8_t *md5 = mem_align(ib, MEM_ALIGN_SIMD); uint32_t *crypt_buf = mem_align(ob, MEM_ALIGN_SIMD); memset(md5, 0, 64 * SIMD_COEF_32 * SIMD_PARA_MD5); for (j = 0; j < SIMD_COEF_32*SIMD_PARA_MD5; ++j) { uint16_t *op = (uint16_t*)&md5[GETPOS(0, j)]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index+j], strlen(saved_key[index+j])); MD5_Final(buffer, &ctx); for (k = 0; k < 16; ++k) { op[0] = itoa16u_w[buffer[k++]]; op[1] = itoa16u_w[buffer[k]]; op += ((SIMD_COEF_32) << 1); } md5[GETPOS(32,j)] = 0x80; md5[GETPOS(57,j)] = 1; } #else unsigned char hex_buffer[BINARY_SIZE * 2]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Final(buffer, &ctx); hex_encode_uppercase(buffer, hex_buffer); #endif do { #ifdef SIMD_COEF_32 SIMDmd5body(md5, crypt_buf, NULL, SSEi_MIXED_IN); // upper case hex encode into the next input buffer. for (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; ++j) { int i; uint16_t *op = (uint16_t*)&md5[GETPOS(0, j)]; p = &crypt_buf[(j&(SIMD_COEF_32-1))+(4*SIMD_COEF_32*(j/SIMD_COEF_32))]; for (i = 0; i < 4; ++i) { t = *p; p += SIMD_COEF_32; op[0] = itoa16u_w[t&0xFF]; op[1] = itoa16u_w[(t>>8)&0xFF]; t >>= 16; op += ((SIMD_COEF_32) << 1); op[0] = itoa16u_w[t&0xFF]; op[1] = itoa16u_w[(t>>8)&0xFF]; op += ((SIMD_COEF_32) << 1); } } #else MD5_Init(&ctx); MD5_Update(&ctx, hex_buffer, BINARY_SIZE * 2); MD5_Final(buffer, &ctx); hex_encode_uppercase(buffer, hex_buffer); #endif --n; } while (n); #ifdef SIMD_COEF_32 p = crypt_buf; for (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; j+=SIMD_COEF_32) { for (k = 0; k < SIMD_COEF_32*4; ++k) { uint32_t J = j+(k&(SIMD_COEF_32-1)), K = (k/SIMD_COEF_32); crypt_out[index+J][K] = *p++; } } #else memcpy((unsigned char*)crypt_out[index], buffer, BINARY_SIZE); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void zipmonster_set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(saved_key[index])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_zipmonster = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, { FORMAT_TAG }, #endif zipmonster_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, zipmonster_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__identity_fc32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_fp32) // op(A') function: GB (_unop_tran__identity_fc32_fp32) // C type: GxB_FC32_t // A type: float // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_fp32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
red_black_constantcoef_gs.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.14 $ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "red_black_gs.h" #ifndef hypre_abs #define hypre_abs(a) (((a)>0) ? (a) : -(a)) #endif /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_RedBlackConstantCoefGS( void *relax_vdata, hypre_StructMatrix *A, hypre_StructVector *b, hypre_StructVector *x ) { hypre_RedBlackGSData *relax_data = relax_vdata; HYPRE_Int max_iter = (relax_data -> max_iter); HYPRE_Int zero_guess = (relax_data -> zero_guess); HYPRE_Int rb_start = (relax_data -> rb_start); HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_ComputePkg *compute_pkg = (relax_data -> compute_pkg); hypre_CommHandle *comm_handle; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *A_dbox; hypre_Box *b_dbox; hypre_Box *x_dbox; HYPRE_Int Ai, Astart, Ani, Anj; HYPRE_Int bi, bstart, bni, bnj; HYPRE_Int xi, xstart, xni, xnj; HYPRE_Int xoff0, xoff1, xoff2, xoff3, xoff4, xoff5; double *Ap; double *App; double *bp; double *xp; /* constant coefficient */ HYPRE_Int constant_coeff= hypre_StructMatrixConstantCoefficient(A); double App0, App1, App2, App3, App4, App5, AApd; hypre_IndexRef start; hypre_Index loop_size; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int offd[6]; HYPRE_Int iter, rb, redblack; HYPRE_Int compute_i, i, j, ii, jj, kk; HYPRE_Int ni, nj, nk; /*---------------------------------------------------------- * Initialize some things and deal with special cases *----------------------------------------------------------*/ hypre_BeginTiming(relax_data -> time_index); hypre_StructMatrixDestroy(relax_data -> A); hypre_StructVectorDestroy(relax_data -> b); hypre_StructVectorDestroy(relax_data -> x); (relax_data -> A) = hypre_StructMatrixRef(A); (relax_data -> x) = hypre_StructVectorRef(x); (relax_data -> b) = hypre_StructVectorRef(b); (relax_data -> num_iterations) = 0; /* if max_iter is zero, return */ if (max_iter == 0) { /* if using a zero initial guess, return zero */ if (zero_guess) { hypre_StructVectorSetConstantValues(x, 0.0); } hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } else { stencil = hypre_StructMatrixStencil(A); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); /* get off-diag entry ranks ready */ i = 0; for (j = 0; j < stencil_size; j++) { if (j != diag_rank) { offd[i] = j; i++; } } } hypre_StructVectorClearBoundGhostValues(x, 0); /*---------------------------------------------------------- * Do zero_guess iteration *----------------------------------------------------------*/ rb = rb_start; iter = 0; if (zero_guess) { for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); Ap = hypre_StructMatrixBoxData(A, i, diag_rank); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetSize(compute_box, loop_size); /* Are we relaxing index start or start+(1,0,0)? */ redblack = hypre_abs(hypre_IndexX(start) + hypre_IndexY(start) + hypre_IndexZ(start) + rb) % 2; bstart = hypre_BoxIndexRank(b_dbox, start); xstart = hypre_BoxIndexRank(x_dbox, start); ni = hypre_IndexX(loop_size); nj = hypre_IndexY(loop_size); nk = hypre_IndexZ(loop_size); bni = hypre_BoxSizeX(b_dbox); xni = hypre_BoxSizeX(x_dbox); bnj = hypre_BoxSizeY(b_dbox); xnj = hypre_BoxSizeY(x_dbox); if (constant_coeff == 1) { Ai= hypre_CCBoxIndexRank(A_dbox, start); AApd= 1.0/Ap[Ai]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = bp[bi]*AApd; } } } } else /* variable coefficient diag */ { Astart = hypre_BoxIndexRank(A_dbox, start); Ani = hypre_BoxSizeX(A_dbox); Anj = hypre_BoxSizeY(A_dbox); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = bp[bi] / Ap[Ai]; } } } } } } } rb = (rb + 1) % 2; iter++; } /*---------------------------------------------------------- * Do regular iterations *----------------------------------------------------------*/ while (iter < 2*max_iter) { for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { xp = hypre_StructVectorData(x); hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle); compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { hypre_FinalizeIndtComputations(comm_handle); compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); Ap = hypre_StructMatrixBoxData(A, i, diag_rank); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetSize(compute_box, loop_size); /* Are we relaxing index start or start+(1,0,0)? */ redblack = hypre_abs(hypre_IndexX(start) + hypre_IndexY(start) + hypre_IndexZ(start) + rb) % 2; bstart = hypre_BoxIndexRank(b_dbox, start); xstart = hypre_BoxIndexRank(x_dbox, start); ni = hypre_IndexX(loop_size); nj = hypre_IndexY(loop_size); nk = hypre_IndexZ(loop_size); bni= hypre_BoxSizeX(b_dbox); xni= hypre_BoxSizeX(x_dbox); bnj= hypre_BoxSizeY(b_dbox); xnj= hypre_BoxSizeY(x_dbox); Ai = hypre_CCBoxIndexRank(A_dbox, start); switch(stencil_size) { case 7: App = hypre_StructMatrixBoxData(A, i, offd[5]); App5= App[Ai]; App = hypre_StructMatrixBoxData(A, i, offd[4]); App4= App[Ai]; xoff5 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[5]]); xoff4 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[4]]); case 5: App = hypre_StructMatrixBoxData(A, i, offd[3]); App3= App[Ai]; App = hypre_StructMatrixBoxData(A, i, offd[2]); App2= App[Ai]; xoff3 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[3]]); xoff2 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[2]]); case 3: App = hypre_StructMatrixBoxData(A, i, offd[1]); App1= App[Ai]; App = hypre_StructMatrixBoxData(A, i, offd[0]); App0= App[Ai]; xoff1 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[1]]); xoff0 = hypre_BoxOffsetDistance( x_dbox, stencil_shape[offd[0]]); break; } if (constant_coeff == 1) { AApd = 1/Ap[Ai]; switch(stencil_size) { case 7: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3] - App4*xp[xi + xoff4] - App5*xp[xi + xoff5])*AApd; } } } break; case 5: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3])*AApd; } } } break; case 3: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1])*AApd; } } } break; } } /* if (constant_coeff == 1) */ else /* variable diagonal */ { Astart = hypre_BoxIndexRank(A_dbox, start); Ani = hypre_BoxSizeX(A_dbox); Anj = hypre_BoxSizeY(A_dbox); switch(stencil_size) { case 7: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3] - App4*xp[xi + xoff4] - App5*xp[xi + xoff5]) / Ap[Ai]; } } } break; case 5: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1] - App2*xp[xi + xoff2] - App3*xp[xi + xoff3]) / Ap[Ai]; } } } break; case 3: #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii,jj,Ai,bi,xi,kk) HYPRE_SMP_SCHEDULE #endif for (kk = 0; kk < nk; kk++) { for (jj = 0; jj < nj; jj++) { ii = (kk + jj + redblack) % 2; Ai = Astart + kk*Anj*Ani + jj*Ani + ii; bi = bstart + kk*bnj*bni + jj*bni + ii; xi = xstart + kk*xnj*xni + jj*xni + ii; for (; ii < ni; ii+=2, Ai+=2, bi+=2, xi+=2) { xp[xi] = (bp[bi] - App0*xp[xi + xoff0] - App1*xp[xi + xoff1]) / Ap[Ai]; } } } break; } /* switch(stencil_size) */ } /* else */ } } } rb = (rb + 1) % 2; iter++; } (relax_data -> num_iterations) = iter / 2; /*----------------------------------------------------------------------- * Return *-----------------------------------------------------------------------*/ hypre_IncFLOPCount(relax_data -> flops); hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; }
sparseBlocksJacobi.h
// // Created by mbarb on 23/01/2018. // #ifndef PARALLELITERATIVE_SPARSEBLOCKSJACOBI_H #define PARALLELITERATIVE_SPARSEBLOCKSJACOBI_H #include "Eigen" #include "utils.h" #include "sparseParallelJacobi.h" namespace Iterative { template <typename Scalar> class sparseBlocksJacobi : public sparseParallelJacobi<Scalar> { public: /** * * @param A linear system matrix * @param b known term vector * @param iterations max number of iterations * @param tolerance min error tolerated * @param workers number of threads * @param blockSize size of the block */ explicit sparseBlocksJacobi( const Eigen::SparseMatrix<Scalar>& A, const Eigen::ColumnVector<Scalar, Eigen::Dynamic>& b, const ulonglong iterations, const Scalar tolerance, const ulong workers = 0L, const ulonglong blockSize = 0L) : sparseParallelJacobi<Scalar>::sparseParallelJacobi(A, b, iterations, tolerance, workers) { this->blockSize = blockSize; if (blockSize == 0) this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong)1L); splitter(); } /** * * @return */ const Eigen::ColumnVector<Scalar, Eigen::Dynamic> &solve() { Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldSolution(this->solution); std::vector<Eigen::Matrix<Scalar,Eigen::Dynamic,Eigen::Dynamic>> inverses(blocks.size()); Eigen::Matrix<Scalar,Eigen::Dynamic, Eigen::Dynamic> I(this->blockSize,this->blockSize); Eigen::SimplicialLDLT<Eigen::SparseMatrix<Scalar>> solver; I.setIdentity(); // compute the inverses of the blocks and memorize it #pragma omp parallel for firstprivate(I) private(solver) for (int i = 0; i < blocks.size()-1; ++i) { Eigen::SparseMatrix<Scalar> block = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols, blocks[i].rows); solver.compute(block); inverses[i] = solver.solve(I); } { Eigen::SparseMatrix<Scalar> block = this->A.block(blocks.back().startCol, blocks.back().startRow, blocks.back().cols,blocks.back().rows); if(block.cols()!=this->blockSize || block.rows()!=this->blockSize){ I.resize(block.rows(), block.cols()); I.setIdentity(); } solver.compute(block); inverses.back() = solver.solve(I); } std::vector<int> index; for (this->iteration=0L; this->iteration < this->iterations; ++this->iteration) { #pragma omp parallel for firstprivate(oldSolution) schedule(dynamic) for (int i = 0; i < inverses.size(); ++i) { // set zero the components of the solution b that corresponds to the inverse Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols); auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols); zeroBlock.setZero(); // the segment of the solution b that this inverse approximates auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols); // approximate the solution using the inverse and the solution at the previous iteration block = inverses[i] * (this->b - (this->A * oldSolution)).segment(blocks[i].startCol, blocks[i].cols); zeroBlock = block; if ((oldBlock - block).template lpNorm<1>() <= this->tolerance*block.size()) { #pragma omp critical index.emplace_back(i); } } if (!index.empty()) { std::sort(index.rbegin(), index.rend()); for (auto i : index) { blocks.erase(blocks.begin() + i); inverses.erase(inverses.begin() + i); } if (inverses.empty()) break; index.clear(); } std::swap(this->solution, oldSolution); } std::cout << this->iteration << std::endl; return this->solution; } protected: ulonglong blockSize; std::vector<Index> blocks; void splitter() { for (ulonglong i = 0; i < this->A.cols(); i += blockSize) { blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong)this->A.cols() - i), i, std::min(blockSize, (ulonglong)this->A.rows() - i))); } } private: }; } #endif //PARALLELITERATIVE_BLOCKSJACOBI_H
main.c
#include <stdio.h> #include <omp.h> int main () { int numberOfThreads, threadID; printf("Setting a fixed number of threads. In this case 8\n"); omp_set_num_threads(8); numberOfThreads = omp_get_num_threads(); printf("The total number of threads is %d\n", numberOfThreads); #pragma omp parallel private(threadID) { // each thread know its ID using threadID as private threadID = omp_get_thread_num(); printf("Hello! my ID is %d\n", threadID); if (threadID == 0) { numberOfThreads = omp_get_num_threads(); printf("I am the thread 0 and the total numer is %d\n", numberOfThreads); } } printf("Now we use 5 threads\n"); omp_set_num_threads(5); numberOfThreads = omp_get_num_threads(); printf("Total number of threads %d\n", numberOfThreads); #pragma omp parallel { threadID = omp_get_thread_num(); printf("Hello! my ID is %d\n", threadID); if (threadID == 0) { numberOfThreads = omp_get_num_threads(); printf("I am the thread 0 and the total numer is %d\n", numberOfThreads); } } return 0; }
par_csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /* The following function was formerly part of hypre_ParMatmul but was removed so it can also be used for multiplication of Boolean matrices */ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int ** C_diag_i, HYPRE_Int ** C_offd_i, /*HYPRE_Int ** B_marker,*/ HYPRE_Int * A_diag_i, HYPRE_Int * A_diag_j, HYPRE_Int * A_offd_i, HYPRE_Int * A_offd_j, HYPRE_Int * B_diag_i, HYPRE_Int * B_diag_j, HYPRE_Int * B_offd_i, HYPRE_Int * B_offd_j, HYPRE_Int * B_ext_diag_i, HYPRE_Int * B_ext_diag_j, HYPRE_Int * B_ext_offd_i, HYPRE_Int * B_ext_offd_j, HYPRE_Int * map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_count_diag, jj_count_offd, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int ii, size, rest; /* First pass begins here. Computes sizes of C rows. Arrays computed: C_diag_i, C_offd_i, B_marker Arrays needed: (11, all HYPRE_Int*) A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_i, B_ext_j, col_map_offd_B, col_map_offd_B, B_offd_i, B_offd_j, B_ext_i, B_ext_j, Scalars computed: C_diag_size, C_offd_size Scalars needed: num_rows_diag_A, num_rows_diag_A, num_cols_offd_A, allsquare, first_col_diag_B, n_cols_B, num_cols_offd_B, num_cols_diag_B */ *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ii, i1, jj_row_begin_diag, jj_row_begin_offd, jj_count_diag, jj_count_offd, jj2, i2, jj3, i3) #endif /*for (ii=0; ii < num_threads; ii++)*/ { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) B_marker[i1] = -1; for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[i1] = jj_row_begin_diag; (*C_offd_i)[i1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { (*C_diag_i)[i1] += jj_count_diag; (*C_offd_i)[i1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } } /* end parallel loop */ /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul : multiplies two ParCSRMatrices A and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt last_col_diag_B; HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C=NULL; hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_Int C_diag_size; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (n_cols_A != n_rows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if globally C=A*B is square and locally C_diag should also be square */ if ( num_rows_diag_A == num_cols_diag_B && n_rows_A == n_cols_B ) { allsquare = 1; } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B -1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=ns; i < ne; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes( /*&C_diag_i, &C_offd_i, &B_marker,*/ memory_location_C, &C_diag_i, &C_offd_i, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C ); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[ns]; jj_count_offd = C_offd_i[ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) { B_marker[i1] = -1; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, n_rows_A, n_cols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C, 0); hypre_ParCSRMatrixSetColStartsOwner(C, 0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt first_row_index = row_starts[0]; #else HYPRE_BigInt first_row_index = row_starts[my_id]; HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); #endif num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int send_proc = send_procs[i]; HYPRE_BigInt send_proc_first_row = row_starts[send_proc]; HYPRE_BigInt send_proc_last_row = row_starts[send_proc + 1]; #endif for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] < 0) len++; #else HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++; #endif } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] > 0) len++; #else HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) len++; #endif } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int send_proc = send_procs[i]; HYPRE_BigInt send_proc_first_row = row_starts[send_proc]; HYPRE_BigInt send_proc_last_row = row_starts[send_proc + 1]; #endif for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] < 0) #else if (offd_data[k] < 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) #endif { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; #ifdef HYPRE_NO_GLOBAL_PARTITION if (offd_data[k] > 0) #else if (offd_data[k] > 0 && (CF_marker_offd[c] >= 0 || (c_global >= send_proc_first_row && c_global < send_proc_last_row))) #endif { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i=0; i < num_recvs; i++) for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) B_ext_i[j+1] += B_ext_i[j]; *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; }; for (i=0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i=0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; } for (i=0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i=0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i=0; i < num_cols; i++) { AT_offd_i[i+1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i=0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i=0; i < num_sends; i++) { for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k=0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i=num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i-1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i=1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i=0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; #ifdef HYPRE_NO_GLOBAL_PARTITION row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); #else row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i=0; i < num_procs+1; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i=0; i < num_procs+1; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[my_id]; first_col_diag_AT = col_starts_AT[my_id]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[my_id+1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[my_id+1]-first_col_diag_AT); #endif AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) { hypre_ParCSRMatrixOwnsColStarts(AT) = 0; } hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; free(counts); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } free(nodes_marked); free(queue); free(GT_diag_mat); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); free(recv_cnts); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } free(nodes_marked); free(queue); free(pgraph_i); free(pgraph_j); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) free(children); /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; free(edges_marked); if (G_type != 0) { free(G_diag_i); free(G_diag_j); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; #ifdef HYPRE_NO_GLOBAL_PARTITION /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; #else global_nrows = proc_offsets1[nprocs]; global_ncols = proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = proc_offsets1[i]; col_starts[i] = proc_offsets1[i]; } #endif A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; free(proc_offsets1); free(proc_offsets2); free(exp_indices); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; free(proc_offsets1); free(proc_offsets2); free(exp_indices); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) A_marker[i] = -1; for (i=ns; i < ne; i++) D_tmp[i] = 1.0/d[i]; num_cols = C_diag_i[ns]; for (i=ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i=0; i < num_cols_offd_B; i++) A_marker[i] = -1; num_cols = C_offd_i[ns]; for (i=ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul : multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (n_rows_A != n_rows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) temp[C_ext_offd_size++] = C_ext_j[j]; else C_ext_diag_size++; C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } for (i=0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } break; } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, n_cols_A, n_cols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ #ifdef HYPRE_NO_GLOBAL_PARTITION /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); #else first_row_index = col_starts_A[my_id]; local_num_rows = (HYPRE_Int)(col_starts_A[my_id+1]-first_row_index); first_col_diag = col_starts_B[my_id]; local_num_cols = (HYPRE_Int)(col_starts_B[my_id+1]-first_col_diag); #endif C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = n_cols_A; hypre_ParCSRMatrixGlobalNumCols(C) = n_cols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) { hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i=0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncCudaComputeStream(hypre_handle()); } #endif return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; #ifdef HYPRE_NO_GLOBAL_PARTITION j = 2; #else j = num_procs + 1; #endif HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(part, hypre_ParVectorPartitioning(b), j*sizeof(HYPRE_BigInt)); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } #ifdef HYPRE_NO_GLOBAL_PARTITION j = 2; #else j = num_procs + 1; #endif row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i-1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k) #endif for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i+1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /* C = alpha * A + beta * B * A and B are assumed to have the same row and column partitionings */ HYPRE_Int hypre_ParcsrAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **Cout ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int nnz_diag_A = A_diag_i[nrow_local]; HYPRE_Int nnz_offd_A = A_offd_i[nrow_local]; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_a = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Complex *B_offd_a = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_B_offd = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_B_offd, HYPRE_MEMORY_HOST); hypre_assert(nrow_global == hypre_ParCSRMatrixGlobalNumRows(B)); hypre_assert(ncol_global == hypre_ParCSRMatrixGlobalNumCols(B)); hypre_assert(nrow_local == hypre_CSRMatrixNumRows(B_diag)); hypre_assert(ncol_local == hypre_CSRMatrixNumCols(B_diag)); HYPRE_Int nnz_diag_B = B_diag_i[nrow_local]; HYPRE_Int nnz_offd_B = B_offd_i[nrow_local]; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /* C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C, *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Int num_cols_C_offd = num_cols_A_offd + num_cols_B_offd; HYPRE_BigInt *col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_C_offd, HYPRE_MEMORY_HOST); HYPRE_Int nnz_diag_C_alloc = nnz_diag_A + nnz_diag_B; HYPRE_Int nnz_offd_C_alloc = nnz_offd_A + nnz_offd_B; HYPRE_Int nnz_diag_C = 0, nnz_offd_C = 0; HYPRE_Int *C_diag_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, memory_location_C); HYPRE_Int *C_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag_C_alloc, memory_location_C); HYPRE_Complex *C_diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag_C_alloc, memory_location_C); HYPRE_Int *C_offd_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, memory_location_C); HYPRE_Int *C_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd_C_alloc, memory_location_C); HYPRE_Complex *C_offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd_C_alloc, memory_location_C); hypre_union2( num_cols_A_offd, col_map_offd_A, num_cols_B_offd, col_map_offd_B, &num_cols_C_offd, col_map_offd_C, A2C_offd, B2C_offd ); HYPRE_Int *marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); HYPRE_Int *marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_C_offd, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } for (i = 0; i < num_cols_C_offd; i++) { marker_offd[i] = -1; } /* main loop for each row i */ for (i = 0; i < nrow_local; i++) { HYPRE_Int diag_i_start = nnz_diag_C; HYPRE_Int offd_i_start = nnz_offd_C; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = alpha * val; nnz_diag_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { HYPRE_Int col = B_diag_j[j]; HYPRE_Complex val = B_diag_a[j]; if (marker_diag[col] < diag_i_start /*&& hypre_abs(val) > 0.0*/) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = beta * val; nnz_diag_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(C_diag_j[p] == col); C_diag_a[p] += beta * val; } } C_diag_i[i+1] = nnz_diag_C; if (num_procs <= 1) { continue; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int colA = A_offd_j[j]; HYPRE_Int colC = A2C_offd[colA]; HYPRE_Complex val = A_offd_a[j]; if (marker_offd[colC] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = alpha * val; nnz_offd_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { HYPRE_Int colB = B_offd_j[j]; HYPRE_Int colC = B2C_offd[colB]; HYPRE_Complex val = B_offd_a[j]; if (marker_offd[colC] < offd_i_start /*&& hypre_abs(val) > 0.0*/) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = beta * val; nnz_offd_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_offd[colC]; hypre_assert(C_offd_j[p] == colC); C_offd_a[p] += beta * val; } } C_offd_i[i+1] = nnz_offd_C; } #ifdef HYPRE_NO_GLOBAL_PARTITION j = 2; #else j = num_procs + 1; #endif row_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix C */ C = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_C, col_starts_C, num_cols_C_offd, nnz_diag_C, nnz_offd_C); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_a; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *Cout = C; /* done */ hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i-1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /* ----------------------------------------------------------------------------- * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts_in, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts; HYPRE_Int nf_local, nc_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs, *sub_idx_diag, *sub_idx_offd; HYPRE_Int num_sends, *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { /* copy cpts_starts first */ HYPRE_Int len; #ifdef HYPRE_NO_GLOBAL_PARTITION len = 2; #else len = num_procs + 1; #endif cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); memcpy(cpts_starts, cpts_starts_in, len*sizeof(HYPRE_BigInt)); #ifdef HYPRE_NO_GLOBAL_PARTITION if (my_id == (num_procs -1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); #else total_global_cpts = cpts_starts[num_procs]; nc_local = (HYPRE_Int)(cpts_starts[my_id+1] - cpts_starts[my_id]); #endif } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } #ifdef HYPRE_NO_GLOBAL_PARTITION fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else fpts_starts = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nf_local, 1, HYPRE_MPI_BIG_INT, &fpts_starts[1], 1, HYPRE_MPI_INT, comm); for (i = 2; i < num_procs+1; i++) { fpts_starts[i] += fpts_starts[i-1]; } total_global_fpts = fpts_starts[num_procs]; #endif } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ #ifdef HYPRE_NO_GLOBAL_PARTITION B_first_col = B_col_starts[0]; #else B_first_col = B_col_starts[my_id]; #endif /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k-1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
convolutiondepthwise_5x5_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { #if __aarch64__ const int w = bottom_blob.w; #endif const int outw = top_blob.w; const int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); int i = 0; #if __aarch64__ float* outptr1 = out.row(1); const float* r5 = img0.row(5); for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _sum00 = _bias0; float32x4_t _sum01 = _bias0; float32x4_t _sum02 = _bias0; float32x4_t _sum03 = _bias0; float32x4_t _sum10 = _bias0; float32x4_t _sum11 = _bias0; float32x4_t _sum12 = _bias0; float32x4_t _sum13 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum00 = vmlaq_f32(_sum00, _k00, _r00); _sum00 = vmlaq_f32(_sum00, _k01, _r01); _sum00 = vmlaq_f32(_sum00, _k02, _r02); _sum00 = vmlaq_f32(_sum00, _k03, _r03); _sum00 = vmlaq_f32(_sum00, _k04, _r04); _sum01 = vmlaq_f32(_sum01, _k00, _r01); _sum01 = vmlaq_f32(_sum01, _k01, _r02); _sum01 = vmlaq_f32(_sum01, _k02, _r03); _sum01 = vmlaq_f32(_sum01, _k03, _r04); _sum01 = vmlaq_f32(_sum01, _k04, _r05); _sum02 = vmlaq_f32(_sum02, _k00, _r02); _sum02 = vmlaq_f32(_sum02, _k01, _r03); _sum02 = vmlaq_f32(_sum02, _k02, _r04); _sum02 = vmlaq_f32(_sum02, _k03, _r05); _sum02 = vmlaq_f32(_sum02, _k04, _r06); _sum03 = vmlaq_f32(_sum03, _k00, _r03); _sum03 = vmlaq_f32(_sum03, _k01, _r04); _sum03 = vmlaq_f32(_sum03, _k02, _r05); _sum03 = vmlaq_f32(_sum03, _k03, _r06); _sum03 = vmlaq_f32(_sum03, _k04, _r07); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _r17 = vld1q_f32(r1 + 28); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k00, _r10); _sum10 = vmlaq_f32(_sum10, _k01, _r11); _sum10 = vmlaq_f32(_sum10, _k02, _r12); _sum10 = vmlaq_f32(_sum10, _k03, _r13); _sum10 = vmlaq_f32(_sum10, _k04, _r14); _sum11 = vmlaq_f32(_sum11, _k00, _r11); _sum11 = vmlaq_f32(_sum11, _k01, _r12); _sum11 = vmlaq_f32(_sum11, _k02, _r13); _sum11 = vmlaq_f32(_sum11, _k03, _r14); _sum11 = vmlaq_f32(_sum11, _k04, _r15); _sum12 = vmlaq_f32(_sum12, _k00, _r12); _sum12 = vmlaq_f32(_sum12, _k01, _r13); _sum12 = vmlaq_f32(_sum12, _k02, _r14); _sum12 = vmlaq_f32(_sum12, _k03, _r15); _sum12 = vmlaq_f32(_sum12, _k04, _r16); _sum13 = vmlaq_f32(_sum13, _k00, _r13); _sum13 = vmlaq_f32(_sum13, _k01, _r14); _sum13 = vmlaq_f32(_sum13, _k02, _r15); _sum13 = vmlaq_f32(_sum13, _k03, _r16); _sum13 = vmlaq_f32(_sum13, _k04, _r17); _sum00 = vmlaq_f32(_sum00, _k10, _r10); _sum00 = vmlaq_f32(_sum00, _k11, _r11); _sum00 = vmlaq_f32(_sum00, _k12, _r12); _sum00 = vmlaq_f32(_sum00, _k13, _r13); _sum00 = vmlaq_f32(_sum00, _k14, _r14); _sum01 = vmlaq_f32(_sum01, _k10, _r11); _sum01 = vmlaq_f32(_sum01, _k11, _r12); _sum01 = vmlaq_f32(_sum01, _k12, _r13); _sum01 = vmlaq_f32(_sum01, _k13, _r14); _sum01 = vmlaq_f32(_sum01, _k14, _r15); _sum02 = vmlaq_f32(_sum02, _k10, _r12); _sum02 = vmlaq_f32(_sum02, _k11, _r13); _sum02 = vmlaq_f32(_sum02, _k12, _r14); _sum02 = vmlaq_f32(_sum02, _k13, _r15); _sum02 = vmlaq_f32(_sum02, _k14, _r16); _sum03 = vmlaq_f32(_sum03, _k10, _r13); _sum03 = vmlaq_f32(_sum03, _k11, _r14); _sum03 = vmlaq_f32(_sum03, _k12, _r15); _sum03 = vmlaq_f32(_sum03, _k13, _r16); _sum03 = vmlaq_f32(_sum03, _k14, _r17); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _r27 = vld1q_f32(r2 + 28); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k10, _r20); _sum10 = vmlaq_f32(_sum10, _k11, _r21); _sum10 = vmlaq_f32(_sum10, _k12, _r22); _sum10 = vmlaq_f32(_sum10, _k13, _r23); _sum10 = vmlaq_f32(_sum10, _k14, _r24); _sum11 = vmlaq_f32(_sum11, _k10, _r21); _sum11 = vmlaq_f32(_sum11, _k11, _r22); _sum11 = vmlaq_f32(_sum11, _k12, _r23); _sum11 = vmlaq_f32(_sum11, _k13, _r24); _sum11 = vmlaq_f32(_sum11, _k14, _r25); _sum12 = vmlaq_f32(_sum12, _k10, _r22); _sum12 = vmlaq_f32(_sum12, _k11, _r23); _sum12 = vmlaq_f32(_sum12, _k12, _r24); _sum12 = vmlaq_f32(_sum12, _k13, _r25); _sum12 = vmlaq_f32(_sum12, _k14, _r26); _sum13 = vmlaq_f32(_sum13, _k10, _r23); _sum13 = vmlaq_f32(_sum13, _k11, _r24); _sum13 = vmlaq_f32(_sum13, _k12, _r25); _sum13 = vmlaq_f32(_sum13, _k13, _r26); _sum13 = vmlaq_f32(_sum13, _k14, _r27); _sum00 = vmlaq_f32(_sum00, _k20, _r20); _sum00 = vmlaq_f32(_sum00, _k21, _r21); _sum00 = vmlaq_f32(_sum00, _k22, _r22); _sum00 = vmlaq_f32(_sum00, _k23, _r23); _sum00 = vmlaq_f32(_sum00, _k24, _r24); _sum01 = vmlaq_f32(_sum01, _k20, _r21); _sum01 = vmlaq_f32(_sum01, _k21, _r22); _sum01 = vmlaq_f32(_sum01, _k22, _r23); _sum01 = vmlaq_f32(_sum01, _k23, _r24); _sum01 = vmlaq_f32(_sum01, _k24, _r25); _sum02 = vmlaq_f32(_sum02, _k20, _r22); _sum02 = vmlaq_f32(_sum02, _k21, _r23); _sum02 = vmlaq_f32(_sum02, _k22, _r24); _sum02 = vmlaq_f32(_sum02, _k23, _r25); _sum02 = vmlaq_f32(_sum02, _k24, _r26); _sum03 = vmlaq_f32(_sum03, _k20, _r23); _sum03 = vmlaq_f32(_sum03, _k21, _r24); _sum03 = vmlaq_f32(_sum03, _k22, _r25); _sum03 = vmlaq_f32(_sum03, _k23, _r26); _sum03 = vmlaq_f32(_sum03, _k24, _r27); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _r37 = vld1q_f32(r3 + 28); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k20, _r30); _sum10 = vmlaq_f32(_sum10, _k21, _r31); _sum10 = vmlaq_f32(_sum10, _k22, _r32); _sum10 = vmlaq_f32(_sum10, _k23, _r33); _sum10 = vmlaq_f32(_sum10, _k24, _r34); _sum11 = vmlaq_f32(_sum11, _k20, _r31); _sum11 = vmlaq_f32(_sum11, _k21, _r32); _sum11 = vmlaq_f32(_sum11, _k22, _r33); _sum11 = vmlaq_f32(_sum11, _k23, _r34); _sum11 = vmlaq_f32(_sum11, _k24, _r35); _sum12 = vmlaq_f32(_sum12, _k20, _r32); _sum12 = vmlaq_f32(_sum12, _k21, _r33); _sum12 = vmlaq_f32(_sum12, _k22, _r34); _sum12 = vmlaq_f32(_sum12, _k23, _r35); _sum12 = vmlaq_f32(_sum12, _k24, _r36); _sum13 = vmlaq_f32(_sum13, _k20, _r33); _sum13 = vmlaq_f32(_sum13, _k21, _r34); _sum13 = vmlaq_f32(_sum13, _k22, _r35); _sum13 = vmlaq_f32(_sum13, _k23, _r36); _sum13 = vmlaq_f32(_sum13, _k24, _r37); _sum00 = vmlaq_f32(_sum00, _k30, _r30); _sum00 = vmlaq_f32(_sum00, _k31, _r31); _sum00 = vmlaq_f32(_sum00, _k32, _r32); _sum00 = vmlaq_f32(_sum00, _k33, _r33); _sum00 = vmlaq_f32(_sum00, _k34, _r34); _sum01 = vmlaq_f32(_sum01, _k30, _r31); _sum01 = vmlaq_f32(_sum01, _k31, _r32); _sum01 = vmlaq_f32(_sum01, _k32, _r33); _sum01 = vmlaq_f32(_sum01, _k33, _r34); _sum01 = vmlaq_f32(_sum01, _k34, _r35); _sum02 = vmlaq_f32(_sum02, _k30, _r32); _sum02 = vmlaq_f32(_sum02, _k31, _r33); _sum02 = vmlaq_f32(_sum02, _k32, _r34); _sum02 = vmlaq_f32(_sum02, _k33, _r35); _sum02 = vmlaq_f32(_sum02, _k34, _r36); _sum03 = vmlaq_f32(_sum03, _k30, _r33); _sum03 = vmlaq_f32(_sum03, _k31, _r34); _sum03 = vmlaq_f32(_sum03, _k32, _r35); _sum03 = vmlaq_f32(_sum03, _k33, _r36); _sum03 = vmlaq_f32(_sum03, _k34, _r37); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _r47 = vld1q_f32(r4 + 28); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum10 = vmlaq_f32(_sum10, _k30, _r40); _sum10 = vmlaq_f32(_sum10, _k31, _r41); _sum10 = vmlaq_f32(_sum10, _k32, _r42); _sum10 = vmlaq_f32(_sum10, _k33, _r43); _sum10 = vmlaq_f32(_sum10, _k34, _r44); _sum11 = vmlaq_f32(_sum11, _k30, _r41); _sum11 = vmlaq_f32(_sum11, _k31, _r42); _sum11 = vmlaq_f32(_sum11, _k32, _r43); _sum11 = vmlaq_f32(_sum11, _k33, _r44); _sum11 = vmlaq_f32(_sum11, _k34, _r45); _sum12 = vmlaq_f32(_sum12, _k30, _r42); _sum12 = vmlaq_f32(_sum12, _k31, _r43); _sum12 = vmlaq_f32(_sum12, _k32, _r44); _sum12 = vmlaq_f32(_sum12, _k33, _r45); _sum12 = vmlaq_f32(_sum12, _k34, _r46); _sum13 = vmlaq_f32(_sum13, _k30, _r43); _sum13 = vmlaq_f32(_sum13, _k31, _r44); _sum13 = vmlaq_f32(_sum13, _k32, _r45); _sum13 = vmlaq_f32(_sum13, _k33, _r46); _sum13 = vmlaq_f32(_sum13, _k34, _r47); _sum00 = vmlaq_f32(_sum00, _k40, _r40); _sum00 = vmlaq_f32(_sum00, _k41, _r41); _sum00 = vmlaq_f32(_sum00, _k42, _r42); _sum00 = vmlaq_f32(_sum00, _k43, _r43); _sum00 = vmlaq_f32(_sum00, _k44, _r44); _sum01 = vmlaq_f32(_sum01, _k40, _r41); _sum01 = vmlaq_f32(_sum01, _k41, _r42); _sum01 = vmlaq_f32(_sum01, _k42, _r43); _sum01 = vmlaq_f32(_sum01, _k43, _r44); _sum01 = vmlaq_f32(_sum01, _k44, _r45); _sum02 = vmlaq_f32(_sum02, _k40, _r42); _sum02 = vmlaq_f32(_sum02, _k41, _r43); _sum02 = vmlaq_f32(_sum02, _k42, _r44); _sum02 = vmlaq_f32(_sum02, _k43, _r45); _sum02 = vmlaq_f32(_sum02, _k44, _r46); _sum03 = vmlaq_f32(_sum03, _k40, _r43); _sum03 = vmlaq_f32(_sum03, _k41, _r44); _sum03 = vmlaq_f32(_sum03, _k42, _r45); _sum03 = vmlaq_f32(_sum03, _k43, _r46); _sum03 = vmlaq_f32(_sum03, _k44, _r47); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r51 = vld1q_f32(r5 + 4); float32x4_t _r52 = vld1q_f32(r5 + 8); float32x4_t _r53 = vld1q_f32(r5 + 12); float32x4_t _r54 = vld1q_f32(r5 + 16); float32x4_t _r55 = vld1q_f32(r5 + 20); float32x4_t _r56 = vld1q_f32(r5 + 24); float32x4_t _r57 = vld1q_f32(r5 + 28); _sum10 = vmlaq_f32(_sum10, _k40, _r50); _sum10 = vmlaq_f32(_sum10, _k41, _r51); _sum10 = vmlaq_f32(_sum10, _k42, _r52); _sum10 = vmlaq_f32(_sum10, _k43, _r53); _sum10 = vmlaq_f32(_sum10, _k44, _r54); _sum11 = vmlaq_f32(_sum11, _k40, _r51); _sum11 = vmlaq_f32(_sum11, _k41, _r52); _sum11 = vmlaq_f32(_sum11, _k42, _r53); _sum11 = vmlaq_f32(_sum11, _k43, _r54); _sum11 = vmlaq_f32(_sum11, _k44, _r55); _sum12 = vmlaq_f32(_sum12, _k40, _r52); _sum12 = vmlaq_f32(_sum12, _k41, _r53); _sum12 = vmlaq_f32(_sum12, _k42, _r54); _sum12 = vmlaq_f32(_sum12, _k43, _r55); _sum12 = vmlaq_f32(_sum12, _k44, _r56); _sum13 = vmlaq_f32(_sum13, _k40, _r53); _sum13 = vmlaq_f32(_sum13, _k41, _r54); _sum13 = vmlaq_f32(_sum13, _k42, _r55); _sum13 = vmlaq_f32(_sum13, _k43, _r56); _sum13 = vmlaq_f32(_sum13, _k44, _r57); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr0 + 4, _sum01); vst1q_f32(outptr0 + 8, _sum02); vst1q_f32(outptr0 + 12, _sum03); vst1q_f32(outptr1, _sum10); vst1q_f32(outptr1 + 4, _sum11); vst1q_f32(outptr1 + 8, _sum12); vst1q_f32(outptr1 + 12, _sum13); r0 += 16; r1 += 16; r2 += 16; r3 += 16; r4 += 16; r5 += 16; outptr0 += 16; outptr1 += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _sum00 = _bias0; float32x4_t _sum01 = _bias0; float32x4_t _sum10 = _bias0; float32x4_t _sum11 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum00 = vmlaq_f32(_sum00, _k00, _r00); _sum00 = vmlaq_f32(_sum00, _k01, _r01); _sum00 = vmlaq_f32(_sum00, _k02, _r02); _sum00 = vmlaq_f32(_sum00, _k03, _r03); _sum00 = vmlaq_f32(_sum00, _k04, _r04); _sum01 = vmlaq_f32(_sum01, _k00, _r01); _sum01 = vmlaq_f32(_sum01, _k01, _r02); _sum01 = vmlaq_f32(_sum01, _k02, _r03); _sum01 = vmlaq_f32(_sum01, _k03, _r04); _sum01 = vmlaq_f32(_sum01, _k04, _r05); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k00, _r10); _sum10 = vmlaq_f32(_sum10, _k01, _r11); _sum10 = vmlaq_f32(_sum10, _k02, _r12); _sum10 = vmlaq_f32(_sum10, _k03, _r13); _sum10 = vmlaq_f32(_sum10, _k04, _r14); _sum11 = vmlaq_f32(_sum11, _k00, _r11); _sum11 = vmlaq_f32(_sum11, _k01, _r12); _sum11 = vmlaq_f32(_sum11, _k02, _r13); _sum11 = vmlaq_f32(_sum11, _k03, _r14); _sum11 = vmlaq_f32(_sum11, _k04, _r15); _sum00 = vmlaq_f32(_sum00, _k10, _r10); _sum00 = vmlaq_f32(_sum00, _k11, _r11); _sum00 = vmlaq_f32(_sum00, _k12, _r12); _sum00 = vmlaq_f32(_sum00, _k13, _r13); _sum00 = vmlaq_f32(_sum00, _k14, _r14); _sum01 = vmlaq_f32(_sum01, _k10, _r11); _sum01 = vmlaq_f32(_sum01, _k11, _r12); _sum01 = vmlaq_f32(_sum01, _k12, _r13); _sum01 = vmlaq_f32(_sum01, _k13, _r14); _sum01 = vmlaq_f32(_sum01, _k14, _r15); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k10, _r20); _sum10 = vmlaq_f32(_sum10, _k11, _r21); _sum10 = vmlaq_f32(_sum10, _k12, _r22); _sum10 = vmlaq_f32(_sum10, _k13, _r23); _sum10 = vmlaq_f32(_sum10, _k14, _r24); _sum11 = vmlaq_f32(_sum11, _k10, _r21); _sum11 = vmlaq_f32(_sum11, _k11, _r22); _sum11 = vmlaq_f32(_sum11, _k12, _r23); _sum11 = vmlaq_f32(_sum11, _k13, _r24); _sum11 = vmlaq_f32(_sum11, _k14, _r25); _sum00 = vmlaq_f32(_sum00, _k20, _r20); _sum00 = vmlaq_f32(_sum00, _k21, _r21); _sum00 = vmlaq_f32(_sum00, _k22, _r22); _sum00 = vmlaq_f32(_sum00, _k23, _r23); _sum00 = vmlaq_f32(_sum00, _k24, _r24); _sum01 = vmlaq_f32(_sum01, _k20, _r21); _sum01 = vmlaq_f32(_sum01, _k21, _r22); _sum01 = vmlaq_f32(_sum01, _k22, _r23); _sum01 = vmlaq_f32(_sum01, _k23, _r24); _sum01 = vmlaq_f32(_sum01, _k24, _r25); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum10 = vmlaq_f32(_sum10, _k20, _r30); _sum10 = vmlaq_f32(_sum10, _k21, _r31); _sum10 = vmlaq_f32(_sum10, _k22, _r32); _sum10 = vmlaq_f32(_sum10, _k23, _r33); _sum10 = vmlaq_f32(_sum10, _k24, _r34); _sum11 = vmlaq_f32(_sum11, _k20, _r31); _sum11 = vmlaq_f32(_sum11, _k21, _r32); _sum11 = vmlaq_f32(_sum11, _k22, _r33); _sum11 = vmlaq_f32(_sum11, _k23, _r34); _sum11 = vmlaq_f32(_sum11, _k24, _r35); _sum00 = vmlaq_f32(_sum00, _k30, _r30); _sum00 = vmlaq_f32(_sum00, _k31, _r31); _sum00 = vmlaq_f32(_sum00, _k32, _r32); _sum00 = vmlaq_f32(_sum00, _k33, _r33); _sum00 = vmlaq_f32(_sum00, _k34, _r34); _sum01 = vmlaq_f32(_sum01, _k30, _r31); _sum01 = vmlaq_f32(_sum01, _k31, _r32); _sum01 = vmlaq_f32(_sum01, _k32, _r33); _sum01 = vmlaq_f32(_sum01, _k33, _r34); _sum01 = vmlaq_f32(_sum01, _k34, _r35); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum10 = vmlaq_f32(_sum10, _k30, _r40); _sum10 = vmlaq_f32(_sum10, _k31, _r41); _sum10 = vmlaq_f32(_sum10, _k32, _r42); _sum10 = vmlaq_f32(_sum10, _k33, _r43); _sum10 = vmlaq_f32(_sum10, _k34, _r44); _sum11 = vmlaq_f32(_sum11, _k30, _r41); _sum11 = vmlaq_f32(_sum11, _k31, _r42); _sum11 = vmlaq_f32(_sum11, _k32, _r43); _sum11 = vmlaq_f32(_sum11, _k33, _r44); _sum11 = vmlaq_f32(_sum11, _k34, _r45); _sum00 = vmlaq_f32(_sum00, _k40, _r40); _sum00 = vmlaq_f32(_sum00, _k41, _r41); _sum00 = vmlaq_f32(_sum00, _k42, _r42); _sum00 = vmlaq_f32(_sum00, _k43, _r43); _sum00 = vmlaq_f32(_sum00, _k44, _r44); _sum01 = vmlaq_f32(_sum01, _k40, _r41); _sum01 = vmlaq_f32(_sum01, _k41, _r42); _sum01 = vmlaq_f32(_sum01, _k42, _r43); _sum01 = vmlaq_f32(_sum01, _k43, _r44); _sum01 = vmlaq_f32(_sum01, _k44, _r45); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r51 = vld1q_f32(r5 + 4); float32x4_t _r52 = vld1q_f32(r5 + 8); float32x4_t _r53 = vld1q_f32(r5 + 12); float32x4_t _r54 = vld1q_f32(r5 + 16); float32x4_t _r55 = vld1q_f32(r5 + 20); _sum10 = vmlaq_f32(_sum10, _k40, _r50); _sum10 = vmlaq_f32(_sum10, _k41, _r51); _sum10 = vmlaq_f32(_sum10, _k42, _r52); _sum10 = vmlaq_f32(_sum10, _k43, _r53); _sum10 = vmlaq_f32(_sum10, _k44, _r54); _sum11 = vmlaq_f32(_sum11, _k40, _r51); _sum11 = vmlaq_f32(_sum11, _k41, _r52); _sum11 = vmlaq_f32(_sum11, _k42, _r53); _sum11 = vmlaq_f32(_sum11, _k43, _r54); _sum11 = vmlaq_f32(_sum11, _k44, _r55); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr0 + 4, _sum01); vst1q_f32(outptr1, _sum10); vst1q_f32(outptr1 + 4, _sum11); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; r5 += 8; outptr0 += 8; outptr1 += 8; } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum1 = vmlaq_f32(_sum1, _k00, _r10); _sum1 = vmlaq_f32(_sum1, _k01, _r11); _sum1 = vmlaq_f32(_sum1, _k02, _r12); _sum1 = vmlaq_f32(_sum1, _k03, _r13); _sum1 = vmlaq_f32(_sum1, _k04, _r14); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum1 = vmlaq_f32(_sum1, _k10, _r20); _sum1 = vmlaq_f32(_sum1, _k11, _r21); _sum1 = vmlaq_f32(_sum1, _k12, _r22); _sum1 = vmlaq_f32(_sum1, _k13, _r23); _sum1 = vmlaq_f32(_sum1, _k14, _r24); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum1 = vmlaq_f32(_sum1, _k20, _r30); _sum1 = vmlaq_f32(_sum1, _k21, _r31); _sum1 = vmlaq_f32(_sum1, _k22, _r32); _sum1 = vmlaq_f32(_sum1, _k23, _r33); _sum1 = vmlaq_f32(_sum1, _k24, _r34); _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum1 = vmlaq_f32(_sum1, _k30, _r40); _sum1 = vmlaq_f32(_sum1, _k31, _r41); _sum1 = vmlaq_f32(_sum1, _k32, _r42); _sum1 = vmlaq_f32(_sum1, _k33, _r43); _sum1 = vmlaq_f32(_sum1, _k34, _r44); _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r51 = vld1q_f32(r5 + 4); float32x4_t _r52 = vld1q_f32(r5 + 8); float32x4_t _r53 = vld1q_f32(r5 + 12); float32x4_t _r54 = vld1q_f32(r5 + 16); _sum1 = vmlaq_f32(_sum1, _k40, _r50); _sum1 = vmlaq_f32(_sum1, _k41, _r51); _sum1 = vmlaq_f32(_sum1, _k42, _r52); _sum1 = vmlaq_f32(_sum1, _k43, _r53); _sum1 = vmlaq_f32(_sum1, _k44, _r54); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; outptr0 += 4; outptr1 += 4; } r0 += 4 * 4 + w * 4; r1 += 4 * 4 + w * 4; r2 += 4 * 4 + w * 4; r3 += 4 * 4 + w * 4; r4 += 4 * 4 + w * 4; r5 += 4 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _sum2 = _bias0; float32x4_t _sum3 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r01); _sum1 = vmlaq_f32(_sum1, _k01, _r02); _sum1 = vmlaq_f32(_sum1, _k02, _r03); _sum1 = vmlaq_f32(_sum1, _k03, _r04); _sum1 = vmlaq_f32(_sum1, _k04, _r05); _sum2 = vmlaq_f32(_sum2, _k00, _r02); _sum2 = vmlaq_f32(_sum2, _k01, _r03); _sum2 = vmlaq_f32(_sum2, _k02, _r04); _sum2 = vmlaq_f32(_sum2, _k03, _r05); _sum2 = vmlaq_f32(_sum2, _k04, _r06); _sum3 = vmlaq_f32(_sum3, _k00, _r03); _sum3 = vmlaq_f32(_sum3, _k01, _r04); _sum3 = vmlaq_f32(_sum3, _k02, _r05); _sum3 = vmlaq_f32(_sum3, _k03, _r06); _sum3 = vmlaq_f32(_sum3, _k04, _r07); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _r17 = vld1q_f32(r1 + 28); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r11); _sum1 = vmlaq_f32(_sum1, _k11, _r12); _sum1 = vmlaq_f32(_sum1, _k12, _r13); _sum1 = vmlaq_f32(_sum1, _k13, _r14); _sum1 = vmlaq_f32(_sum1, _k14, _r15); _sum2 = vmlaq_f32(_sum2, _k10, _r12); _sum2 = vmlaq_f32(_sum2, _k11, _r13); _sum2 = vmlaq_f32(_sum2, _k12, _r14); _sum2 = vmlaq_f32(_sum2, _k13, _r15); _sum2 = vmlaq_f32(_sum2, _k14, _r16); _sum3 = vmlaq_f32(_sum3, _k10, _r13); _sum3 = vmlaq_f32(_sum3, _k11, _r14); _sum3 = vmlaq_f32(_sum3, _k12, _r15); _sum3 = vmlaq_f32(_sum3, _k13, _r16); _sum3 = vmlaq_f32(_sum3, _k14, _r17); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _r27 = vld1q_f32(r2 + 28); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r21); _sum1 = vmlaq_f32(_sum1, _k21, _r22); _sum1 = vmlaq_f32(_sum1, _k22, _r23); _sum1 = vmlaq_f32(_sum1, _k23, _r24); _sum1 = vmlaq_f32(_sum1, _k24, _r25); _sum2 = vmlaq_f32(_sum2, _k20, _r22); _sum2 = vmlaq_f32(_sum2, _k21, _r23); _sum2 = vmlaq_f32(_sum2, _k22, _r24); _sum2 = vmlaq_f32(_sum2, _k23, _r25); _sum2 = vmlaq_f32(_sum2, _k24, _r26); _sum3 = vmlaq_f32(_sum3, _k20, _r23); _sum3 = vmlaq_f32(_sum3, _k21, _r24); _sum3 = vmlaq_f32(_sum3, _k22, _r25); _sum3 = vmlaq_f32(_sum3, _k23, _r26); _sum3 = vmlaq_f32(_sum3, _k24, _r27); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _r37 = vld1q_f32(r3 + 28); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r31); _sum1 = vmlaq_f32(_sum1, _k31, _r32); _sum1 = vmlaq_f32(_sum1, _k32, _r33); _sum1 = vmlaq_f32(_sum1, _k33, _r34); _sum1 = vmlaq_f32(_sum1, _k34, _r35); _sum2 = vmlaq_f32(_sum2, _k30, _r32); _sum2 = vmlaq_f32(_sum2, _k31, _r33); _sum2 = vmlaq_f32(_sum2, _k32, _r34); _sum2 = vmlaq_f32(_sum2, _k33, _r35); _sum2 = vmlaq_f32(_sum2, _k34, _r36); _sum3 = vmlaq_f32(_sum3, _k30, _r33); _sum3 = vmlaq_f32(_sum3, _k31, _r34); _sum3 = vmlaq_f32(_sum3, _k32, _r35); _sum3 = vmlaq_f32(_sum3, _k33, _r36); _sum3 = vmlaq_f32(_sum3, _k34, _r37); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _r47 = vld1q_f32(r4 + 28); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r41); _sum1 = vmlaq_f32(_sum1, _k41, _r42); _sum1 = vmlaq_f32(_sum1, _k42, _r43); _sum1 = vmlaq_f32(_sum1, _k43, _r44); _sum1 = vmlaq_f32(_sum1, _k44, _r45); _sum2 = vmlaq_f32(_sum2, _k40, _r42); _sum2 = vmlaq_f32(_sum2, _k41, _r43); _sum2 = vmlaq_f32(_sum2, _k42, _r44); _sum2 = vmlaq_f32(_sum2, _k43, _r45); _sum2 = vmlaq_f32(_sum2, _k44, _r46); _sum3 = vmlaq_f32(_sum3, _k40, _r43); _sum3 = vmlaq_f32(_sum3, _k41, _r44); _sum3 = vmlaq_f32(_sum3, _k42, _r45); _sum3 = vmlaq_f32(_sum3, _k43, _r46); _sum3 = vmlaq_f32(_sum3, _k44, _r47); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); vst1q_f32(outptr0 + 8, _sum2); vst1q_f32(outptr0 + 12, _sum3); r0 += 16; r1 += 16; r2 += 16; r3 += 16; r4 += 16; outptr0 += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r01); _sum1 = vmlaq_f32(_sum1, _k01, _r02); _sum1 = vmlaq_f32(_sum1, _k02, _r03); _sum1 = vmlaq_f32(_sum1, _k03, _r04); _sum1 = vmlaq_f32(_sum1, _k04, _r05); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r11); _sum1 = vmlaq_f32(_sum1, _k11, _r12); _sum1 = vmlaq_f32(_sum1, _k12, _r13); _sum1 = vmlaq_f32(_sum1, _k13, _r14); _sum1 = vmlaq_f32(_sum1, _k14, _r15); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r21); _sum1 = vmlaq_f32(_sum1, _k21, _r22); _sum1 = vmlaq_f32(_sum1, _k22, _r23); _sum1 = vmlaq_f32(_sum1, _k23, _r24); _sum1 = vmlaq_f32(_sum1, _k24, _r25); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r31); _sum1 = vmlaq_f32(_sum1, _k31, _r32); _sum1 = vmlaq_f32(_sum1, _k32, _r33); _sum1 = vmlaq_f32(_sum1, _k33, _r34); _sum1 = vmlaq_f32(_sum1, _k34, _r35); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r41); _sum1 = vmlaq_f32(_sum1, _k41, _r42); _sum1 = vmlaq_f32(_sum1, _k42, _r43); _sum1 = vmlaq_f32(_sum1, _k43, _r44); _sum1 = vmlaq_f32(_sum1, _k44, _r45); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; outptr0 += 8; } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); vst1q_f32(outptr0, _sum0); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; outptr0 += 4; } r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; } } } static void convdw5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _sum2 = _bias0; float32x4_t _sum3 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _r08 = vld1q_f32(r0 + 32); float32x4_t _r09 = vld1q_f32(r0 + 36); float32x4_t _r010 = vld1q_f32(r0 + 40); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r02); _sum1 = vmlaq_f32(_sum1, _k01, _r03); _sum1 = vmlaq_f32(_sum1, _k02, _r04); _sum1 = vmlaq_f32(_sum1, _k03, _r05); _sum1 = vmlaq_f32(_sum1, _k04, _r06); _sum2 = vmlaq_f32(_sum2, _k00, _r04); _sum2 = vmlaq_f32(_sum2, _k01, _r05); _sum2 = vmlaq_f32(_sum2, _k02, _r06); _sum2 = vmlaq_f32(_sum2, _k03, _r07); _sum2 = vmlaq_f32(_sum2, _k04, _r08); _sum3 = vmlaq_f32(_sum3, _k00, _r06); _sum3 = vmlaq_f32(_sum3, _k01, _r07); _sum3 = vmlaq_f32(_sum3, _k02, _r08); _sum3 = vmlaq_f32(_sum3, _k03, _r09); _sum3 = vmlaq_f32(_sum3, _k04, _r010); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _r17 = vld1q_f32(r1 + 28); float32x4_t _r18 = vld1q_f32(r1 + 32); float32x4_t _r19 = vld1q_f32(r1 + 36); float32x4_t _r110 = vld1q_f32(r1 + 40); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r12); _sum1 = vmlaq_f32(_sum1, _k11, _r13); _sum1 = vmlaq_f32(_sum1, _k12, _r14); _sum1 = vmlaq_f32(_sum1, _k13, _r15); _sum1 = vmlaq_f32(_sum1, _k14, _r16); _sum2 = vmlaq_f32(_sum2, _k10, _r14); _sum2 = vmlaq_f32(_sum2, _k11, _r15); _sum2 = vmlaq_f32(_sum2, _k12, _r16); _sum2 = vmlaq_f32(_sum2, _k13, _r17); _sum2 = vmlaq_f32(_sum2, _k14, _r18); _sum3 = vmlaq_f32(_sum3, _k10, _r16); _sum3 = vmlaq_f32(_sum3, _k11, _r17); _sum3 = vmlaq_f32(_sum3, _k12, _r18); _sum3 = vmlaq_f32(_sum3, _k13, _r19); _sum3 = vmlaq_f32(_sum3, _k14, _r110); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _r27 = vld1q_f32(r2 + 28); float32x4_t _r28 = vld1q_f32(r2 + 32); float32x4_t _r29 = vld1q_f32(r2 + 36); float32x4_t _r210 = vld1q_f32(r2 + 40); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r22); _sum1 = vmlaq_f32(_sum1, _k21, _r23); _sum1 = vmlaq_f32(_sum1, _k22, _r24); _sum1 = vmlaq_f32(_sum1, _k23, _r25); _sum1 = vmlaq_f32(_sum1, _k24, _r26); _sum2 = vmlaq_f32(_sum2, _k20, _r24); _sum2 = vmlaq_f32(_sum2, _k21, _r25); _sum2 = vmlaq_f32(_sum2, _k22, _r26); _sum2 = vmlaq_f32(_sum2, _k23, _r27); _sum2 = vmlaq_f32(_sum2, _k24, _r28); _sum3 = vmlaq_f32(_sum3, _k20, _r26); _sum3 = vmlaq_f32(_sum3, _k21, _r27); _sum3 = vmlaq_f32(_sum3, _k22, _r28); _sum3 = vmlaq_f32(_sum3, _k23, _r29); _sum3 = vmlaq_f32(_sum3, _k24, _r210); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _r37 = vld1q_f32(r3 + 28); float32x4_t _r38 = vld1q_f32(r3 + 32); float32x4_t _r39 = vld1q_f32(r3 + 36); float32x4_t _r310 = vld1q_f32(r3 + 40); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r32); _sum1 = vmlaq_f32(_sum1, _k31, _r33); _sum1 = vmlaq_f32(_sum1, _k32, _r34); _sum1 = vmlaq_f32(_sum1, _k33, _r35); _sum1 = vmlaq_f32(_sum1, _k34, _r36); _sum2 = vmlaq_f32(_sum2, _k30, _r34); _sum2 = vmlaq_f32(_sum2, _k31, _r35); _sum2 = vmlaq_f32(_sum2, _k32, _r36); _sum2 = vmlaq_f32(_sum2, _k33, _r37); _sum2 = vmlaq_f32(_sum2, _k34, _r38); _sum3 = vmlaq_f32(_sum3, _k30, _r36); _sum3 = vmlaq_f32(_sum3, _k31, _r37); _sum3 = vmlaq_f32(_sum3, _k32, _r38); _sum3 = vmlaq_f32(_sum3, _k33, _r39); _sum3 = vmlaq_f32(_sum3, _k34, _r310); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _r47 = vld1q_f32(r4 + 28); float32x4_t _r48 = vld1q_f32(r4 + 32); float32x4_t _r49 = vld1q_f32(r4 + 36); float32x4_t _r410 = vld1q_f32(r4 + 40); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r42); _sum1 = vmlaq_f32(_sum1, _k41, _r43); _sum1 = vmlaq_f32(_sum1, _k42, _r44); _sum1 = vmlaq_f32(_sum1, _k43, _r45); _sum1 = vmlaq_f32(_sum1, _k44, _r46); _sum2 = vmlaq_f32(_sum2, _k40, _r44); _sum2 = vmlaq_f32(_sum2, _k41, _r45); _sum2 = vmlaq_f32(_sum2, _k42, _r46); _sum2 = vmlaq_f32(_sum2, _k43, _r47); _sum2 = vmlaq_f32(_sum2, _k44, _r48); _sum3 = vmlaq_f32(_sum3, _k40, _r46); _sum3 = vmlaq_f32(_sum3, _k41, _r47); _sum3 = vmlaq_f32(_sum3, _k42, _r48); _sum3 = vmlaq_f32(_sum3, _k43, _r49); _sum3 = vmlaq_f32(_sum3, _k44, _r410); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); vst1q_f32(outptr0 + 8, _sum2); vst1q_f32(outptr0 + 12, _sum3); r0 += 8 * 4; r1 += 8 * 4; r2 += 8 * 4; r3 += 8 * 4; r4 += 8 * 4; outptr0 += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _sum0 = _bias0; float32x4_t _sum1 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); _sum1 = vmlaq_f32(_sum1, _k00, _r02); _sum1 = vmlaq_f32(_sum1, _k01, _r03); _sum1 = vmlaq_f32(_sum1, _k02, _r04); _sum1 = vmlaq_f32(_sum1, _k03, _r05); _sum1 = vmlaq_f32(_sum1, _k04, _r06); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r15 = vld1q_f32(r1 + 20); float32x4_t _r16 = vld1q_f32(r1 + 24); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); _sum1 = vmlaq_f32(_sum1, _k10, _r12); _sum1 = vmlaq_f32(_sum1, _k11, _r13); _sum1 = vmlaq_f32(_sum1, _k12, _r14); _sum1 = vmlaq_f32(_sum1, _k13, _r15); _sum1 = vmlaq_f32(_sum1, _k14, _r16); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _r25 = vld1q_f32(r2 + 20); float32x4_t _r26 = vld1q_f32(r2 + 24); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); _sum1 = vmlaq_f32(_sum1, _k20, _r22); _sum1 = vmlaq_f32(_sum1, _k21, _r23); _sum1 = vmlaq_f32(_sum1, _k22, _r24); _sum1 = vmlaq_f32(_sum1, _k23, _r25); _sum1 = vmlaq_f32(_sum1, _k24, _r26); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _r35 = vld1q_f32(r3 + 20); float32x4_t _r36 = vld1q_f32(r3 + 24); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); _sum1 = vmlaq_f32(_sum1, _k30, _r32); _sum1 = vmlaq_f32(_sum1, _k31, _r33); _sum1 = vmlaq_f32(_sum1, _k32, _r34); _sum1 = vmlaq_f32(_sum1, _k33, _r35); _sum1 = vmlaq_f32(_sum1, _k34, _r36); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _r45 = vld1q_f32(r4 + 20); float32x4_t _r46 = vld1q_f32(r4 + 24); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); _sum1 = vmlaq_f32(_sum1, _k40, _r42); _sum1 = vmlaq_f32(_sum1, _k41, _r43); _sum1 = vmlaq_f32(_sum1, _k42, _r44); _sum1 = vmlaq_f32(_sum1, _k43, _r45); _sum1 = vmlaq_f32(_sum1, _k44, _r46); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; outptr0 += 8; } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k03 = vld1q_f32(k0 + 12); float32x4_t _k04 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k03, _r03); _sum0 = vmlaq_f32(_sum0, _k04, _r04); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _k10 = vld1q_f32(k0); float32x4_t _k11 = vld1q_f32(k0 + 4); float32x4_t _k12 = vld1q_f32(k0 + 8); float32x4_t _k13 = vld1q_f32(k0 + 12); float32x4_t _k14 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k13, _r13); _sum0 = vmlaq_f32(_sum0, _k14, _r14); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); float32x4_t _k20 = vld1q_f32(k0); float32x4_t _k21 = vld1q_f32(k0 + 4); float32x4_t _k22 = vld1q_f32(k0 + 8); float32x4_t _k23 = vld1q_f32(k0 + 12); float32x4_t _k24 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); _sum0 = vmlaq_f32(_sum0, _k23, _r23); _sum0 = vmlaq_f32(_sum0, _k24, _r24); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r31 = vld1q_f32(r3 + 4); float32x4_t _r32 = vld1q_f32(r3 + 8); float32x4_t _r33 = vld1q_f32(r3 + 12); float32x4_t _r34 = vld1q_f32(r3 + 16); float32x4_t _k30 = vld1q_f32(k0); float32x4_t _k31 = vld1q_f32(k0 + 4); float32x4_t _k32 = vld1q_f32(k0 + 8); float32x4_t _k33 = vld1q_f32(k0 + 12); float32x4_t _k34 = vld1q_f32(k0 + 16); k0 += 20; _sum0 = vmlaq_f32(_sum0, _k30, _r30); _sum0 = vmlaq_f32(_sum0, _k31, _r31); _sum0 = vmlaq_f32(_sum0, _k32, _r32); _sum0 = vmlaq_f32(_sum0, _k33, _r33); _sum0 = vmlaq_f32(_sum0, _k34, _r34); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r41 = vld1q_f32(r4 + 4); float32x4_t _r42 = vld1q_f32(r4 + 8); float32x4_t _r43 = vld1q_f32(r4 + 12); float32x4_t _r44 = vld1q_f32(r4 + 16); float32x4_t _k40 = vld1q_f32(k0); float32x4_t _k41 = vld1q_f32(k0 + 4); float32x4_t _k42 = vld1q_f32(k0 + 8); float32x4_t _k43 = vld1q_f32(k0 + 12); float32x4_t _k44 = vld1q_f32(k0 + 16); k0 -= 80; _sum0 = vmlaq_f32(_sum0, _k40, _r40); _sum0 = vmlaq_f32(_sum0, _k41, _r41); _sum0 = vmlaq_f32(_sum0, _k42, _r42); _sum0 = vmlaq_f32(_sum0, _k43, _r43); _sum0 = vmlaq_f32(_sum0, _k44, _r44); vst1q_f32(outptr0, _sum0); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; r3 += 2 * 4; r4 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
utils.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> void print_mat(int n , int** mat) { int i,j; for (i = 0; i < n; i++){ printf("%X: ",mat[i]); for (j = 0; j < n; j++){ printf("%d ",mat[i][j]); } printf("\n"); } printf("\n"); } void free_mat(int n, int** mat) { int i; for(i = 0; i < n; i++) { free(mat[i]); } free(mat); } void free_vec(int n, int* mat) { free(mat); } int** make_rand_mat(int n, int max_val) { double begin,end; int i,j; int** mat = malloc(sizeof(int*)*n); begin = omp_get_wtime(); srand(time(NULL)); // generate rand seed from current time #pragma omp parallel for private(i,j) firstprivate (n) for (i = 0; i < n; i++) { mat[i] = malloc(sizeof(int)*n); #pragma omp parallel for private(j) for (j = 0; j < n; j++) { mat[i][j] = rand() % max_val; } } end = omp_get_wtime(); printf("matrix initialization with random numbers took %lf seconds\n", end - begin); return mat; } int** make_zero_mat(int n) { double begin,end; int i,j; int** mat = malloc (sizeof(int*)*n); begin = omp_get_wtime(); srand(time(NULL)); // generate rand seed from current time #pragma omp parallel for private(i,j) firstprivate (n) for (i = 0; i < n; i++) { mat[i] = calloc(n, sizeof(int)); } end = omp_get_wtime(); printf("matrix initialization with zeros took %lf seconds\n", end - begin); return mat; } int compare_pat(int n, int* bad_i, int* mat1, int* mat2) { int i,j; for(i = 0; i < n; i++) { if(mat1[i] - mat2[i]) { *bad_i = i; return 1; } } return 0; } int* make_rand_vect(int n, int max_val) { int i; int * arr = (int*)malloc(sizeof(int)*n); if (max_val == 0) { for(i = 0; i < n; i++) { arr[i] = 0; } } else { srand(time(NULL)); for(i = 0; i < n; i++) { arr[i] = rand() % max_val; } } return arr; }
cram-md5_fmt_plug.c
/* Cracker for CRAM-MD5 challenge-response authentication mechanism. * Hacked together during November of 2012 by Dhiru Kholia <dhiru at openwall.com>. * * See http://susam.in/blog/auth-cram-md5/ and * * http://www.openwall.com/lists/john-users/2010/07/27/1 * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main cram_md5_fmt; #elif FMT_REGISTERS_H john_register_one(&cram_md5_fmt); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include <openssl/bio.h> #include <openssl/buffer.h> #include <openssl/evp.h> #include <openssl/md5.h> #include <openssl/hmac.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #define OMP_SCALE 64 #endif #define FORMAT_LABEL "cram_md5" #define FORMAT_NAME "CRAM-MD5" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_LENGTH 256 #define USER_LENGTH 256 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests cram_md5_tests[] = { /* Challenge ==> PDE3ODkzLjEzMjA2NzkxMjNAdGVzc2VyYWN0LnN1c2FtLmluPg== * Response ==> YWxpY2UgNjRiMmE0M2MxZjZlZDY4MDZhOTgwOTE0ZTIzZTc1ZjA= */ {"$cram_md5$PDE3ODkzLjEzMjA2NzkxMjNAdGVzc2VyYWN0LnN1c2FtLmluPg==$YWxpY2UgNjRiMmE0M2MxZjZlZDY4MDZhOTgwOTE0ZTIzZTc1ZjA=", "wonderland"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { unsigned char salt[SALT_LENGTH]; int saltlen; unsigned char username[USER_LENGTH]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void base64_decode_good(char *input, char *output) { BIO *out_buffer, *buffer; int length = strlen(input); /* dirty hack */ input[length] = '\n'; input[length + 1] = 0; length += 1; out_buffer = BIO_new(BIO_f_base64()); buffer = BIO_new_mem_buf(input, length); buffer = BIO_push(out_buffer, buffer); BIO_read(buffer, output, length); BIO_free_all(buffer); } static int valid(char *ciphertext, struct fmt_main *self) { if (strncmp(ciphertext, "$cram_md5$", 10) != 0) return 0; return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char pinput[SALT_LENGTH] = { 0 }; char *p; static struct custom_salt cs; memset(cs.salt, 0, SALT_LENGTH); ctcopy += 10; p = strtok(ctcopy, "$"); strncpy(pinput, p, SALT_LENGTH); base64_decode_good(pinput, (char*)cs.salt); cs.saltlen = strlen((char*)cs.salt); p = strtok(NULL, "$"); strncpy(pinput, p, SALT_LENGTH); base64_decode_good(pinput, (char*)cs.username); p = strrchr((char*)cs.username, ' '); /* find username - hash delimiter */ *p = 0; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char pinput[256] = { 0 }; char *p; int i; p = strrchr(ciphertext, '$') + 1; strncpy(pinput, p, SALT_LENGTH); base64_decode_good(pinput, (char*)out); p = strrchr((char*)out, ' ') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char *result; result = HMAC(EVP_md5(), saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, NULL, NULL); memcpy(crypt_out[index], result, 16); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void cram_md5_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main cram_md5_fmt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, DEFAULT_ALIGN, SALT_SIZE, DEFAULT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif cram_md5_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, cram_md5_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
tri_template.c
//------------------------------------------------------------------------------ // tri_template: count triangles in a graph, outer-product method //------------------------------------------------------------------------------ // Compute the # of triangles in a graph, C<A>=A*A in GraphBLAS notation, then // ntri=sum(C). Or, in MATLAB notation, ntri = sum (sum ((A*A).*A)). C=A*A is // computed using an outer-product matrix multiplication. C is not computed // explicitly, but its entries are summed up in the scalar ntri. // A is a binary matrix stored in compressed sparse column form. Its values // are not stored. If A(i,j) is in the pattern, its value is assumed to be 1. // The pattern of column j is in Ai [Ap [j]..Ap[j+1]]. Row indices in the // matrix A must be sorted. Ap[0]=0, and Ap [n] = total number of entries in // the matrix. Ap is of size n+1. // When this function is called, A is a triangular matrix (with no diagonal // entries, or a symmetric permutation of such a triangular matrix. However, // this function works on any matrix. It just computes sum(sum((A*A).*A) in // MATLAB notation, or C<A>=A*A where A is binary, followed by reduce(C), to // scalar. // So it can be used with C<L>=L*L or C<U>=U*U, and ntri is the number of // triangles. It can also be used as C<A>=A*A where A is symmetric, in // which case the # of triangles is ntri/6 (Burkhardt's method). // This file creates eight methods via compile-time definitions: // // BIT: if defined, Mark is a bit vector of size n. Otherwise it is a // bool array of size n. This can help cut workspace if many // threads are used since each thread needs its own Mark array. // PARALLEL: if defined, then OpenMP is used // LOGSEARCH: if binary search is used to reduce the work // Compare this code with tri_simple.c. That code is a simple version of this // algorithm, with the bare essential features. #ifdef BIT #define MARK_TYPE uint8_t #define MARK_SIZE (1 + n/8) #define SET_MARK(i) { Index t=(i) ; Mark [t/8] |= (1 << (t%8)) ; } #define CLEAR_MARK(i) { Mark [(i)/8] = 0 ; } #define COUNT_MARK(i) { Index t=(i) ; if (Mark [t/8] & (1 << t%8)) ntri++ ; } #else #define MARK_TYPE bool #define MARK_SIZE n #define SET_MARK(i) { Mark [i] = 1 ; } #define CLEAR_MARK(i) { Mark [i] = 0 ; } #define COUNT_MARK(i) { ntri += Mark [i] ; } #endif #ifdef LOGSEARCH #ifdef PARALLEL #ifdef BIT #define TRI_FUNCTION tri_logbit_parallel #else #define TRI_FUNCTION tri_logmark_parallel #endif #else #ifdef BIT #define TRI_FUNCTION tri_logbit #else #define TRI_FUNCTION tri_logmark #endif #endif #else #ifdef PARALLEL #ifdef BIT #define TRI_FUNCTION tri_bit_parallel #else #define TRI_FUNCTION tri_mark_parallel #endif #else #ifdef BIT #define TRI_FUNCTION tri_bit #else #define TRI_FUNCTION tri_mark #endif #endif #endif //------------------------------------------------------------------------------ // tri_* function: count the triangles in a graph //------------------------------------------------------------------------------ int64_t TRI_FUNCTION // # of triangles, or -1 if out of memory ( const int64_t *restrict Ap, // column pointers, size n+1 const Index *restrict Ai, // row indices, size nz = Ap [n] const Index n // A is n-by-n #ifdef PARALLEL , const int threads // # of threads , const Index chunk // scheduler chunk size #endif ) { int64_t ntri = 0 ; // # of triangles bool ok = true ; // false if any thread ran out of memory //-------------------------------------------------------------------------- // check if sequential version of same algorithm should be used //-------------------------------------------------------------------------- #ifdef PARALLEL if (n < chunk || threads < 2) { #ifdef LOGSEARCH #ifdef BIT return (tri_logbit (Ap, Ai, n)) ; #else return (tri_logmark (Ap, Ai, n)) ; #endif #else #ifdef BIT return (tri_bit (Ap, Ai, n)) ; #else return (tri_mark (Ap, Ai, n)) ; #endif #endif } #endif //-------------------------------------------------------------------------- // parallel and sequential triangle counting, outer-product method //-------------------------------------------------------------------------- #ifdef PARALLEL #pragma omp parallel num_threads(threads) reduction(+:ntri) reduction(&&:ok) #endif { //---------------------------------------------------------------------- // get workspace //---------------------------------------------------------------------- // each thread needs its own private workspace, Mark [0..n-1] = 0 MARK_TYPE *restrict Mark = calloc (MARK_SIZE, sizeof (MARK_TYPE)) ; if (Mark == NULL) { ok = false ; } else { //------------------------------------------------------------------ // count triangles in each column C(:,j) //------------------------------------------------------------------ #ifdef PARALLEL #pragma omp for schedule(dynamic,chunk) #endif for (Index j = 0 ; j < n ; j++) { //-------------------------------------------------------------- // get column j of A //-------------------------------------------------------------- // A(:,j) has row indices in range jlo..jhi Index jlo, jhi ; if (!tri_lohi (Ap, Ai, j, &jlo, &jhi)) continue ; bool marked = false ; #ifdef LOGSEARCH Index ljnz = jhi - jlo + 1 ; #endif //-------------------------------------------------------------- // compute sum(C(:,j)) where C=(A*A(:,j))*.(A(:,j)) //-------------------------------------------------------------- for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { //---------------------------------------------------------- // A(k,j) is present, compute C(:,j) += A(:,j)*A(k,j) //---------------------------------------------------------- const Index k = Ai [p] ; // A(:,k) has row indices in range klo..khi Index klo, khi ; if (!tri_lohi (Ap, Ai, k, &klo, &khi)) continue ; // skip if A(:,j) and A(:,k) do not overlap if (khi < jlo || klo > jhi) continue ; //---------------------------------------------------------- // binary search if A(:,k) has many nonzeros //---------------------------------------------------------- #ifdef LOGSEARCH // find the intersection between the mask, A(:,j), // and the column A(:,k) Index lknz = khi - klo + 1 ; if (512 * ljnz < lknz) // (4 * ljnz * log2 (lknz) < lknz) { //------------------------------------------------------ // A (:,j) is very sparse compared with A (:,k) ; //------------------------------------------------------ // Do not use the Mark array at all, but use binary // search instead. time is O(ljnz * log (lknz)) int64_t pleft = Ap [k] ; for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { // find i in A (:,k) Index i = Ai [p] ; // binary search of Ai [pleft ... pright] for i int64_t pright = Ap [k+1] - 1 ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) / 2 ; if (i > Ai [pmiddle]) { // if in the list, it appears in // [pmiddle+1..pright] pleft = pmiddle + 1 ; } else { // if in the list, it appears in // [pleft..pmiddle] pright = pmiddle ; } } if (pleft == pright && Ai [pleft] == i) { // found it: A(i,k) and A (k,j) both nonzero // C(i,j) += A (i,k) * A (k,j) ntri++ ; } } continue ; } #endif //---------------------------------------------------------- // linear search //---------------------------------------------------------- if (!marked) { // scatter A(:,j) into Mark for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { // Mark [Ai [p]] = 1 ; SET_MARK (Ai [p]) ; } marked = true ; } for (int64_t pa = Ap [k] ; pa < Ap [k+1] ; pa++) { // C(i,j) += A (i,k) * A (k,j) COUNT_MARK (Ai [pa]) ; } } //-------------------------------------------------------------- // clear the Mark array //-------------------------------------------------------------- if (marked) { for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { // Mark [Ai [p]] = 0 ; CLEAR_MARK (Ai [p]) ; } } } //------------------------------------------------------------------ // free workspace //------------------------------------------------------------------ free (Mark) ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (ok ? ntri : -1) ; } #undef BIT #undef PARALLEL #undef MARK_TYPE #undef MARK_SIZE #undef SET_MARK #undef CLEAR_MARK #undef COUNT_MARK #undef TRI_FUNCTION #undef LOGSEARCH
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
VolumetricConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/VolumetricConvolutionMM.c" #else #include <ATen/div_rtn.h> #define CONV3D_OMP_THRESHOLD 20 static void inline THNN_(VolumetricConvolutionMM_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int weight_nullable) { THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); THArgCheck(kT > 0 && kW > 0 && kH > 0, 8, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); if (weight != NULL) { THNN_ARGCHECK(!weight->is_empty() && (weight->dim() == 2 || weight->dim() == 5), 5, weight, "non-empty 2D or 5D weight tensor expected, but got: %s"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0)); } } else if (!weight_nullable) { THError("weight tensor is expected to be non-nullable"); } int ndim = input->dim(); int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimt++; dimh++; dimw++; } int64_t inputDepth; int64_t inputHeight; int64_t inputWidth; int64_t exactInputDepth; int64_t exactInputHeight; int64_t exactInputWidth; int64_t outputDepth; int64_t outputHeight; int64_t outputWidth; inputDepth = input->size(dimt); inputHeight = input->size(dimh); inputWidth = input->size(dimw); exactInputDepth = inputDepth + 2*pT; exactInputHeight = inputHeight + 2*pH; exactInputWidth = inputWidth + 2*pW; if (exactInputDepth < kT || exactInputHeight < kH || exactInputWidth < kW) { THError("Calculated padded input size per channel: (%ld x %ld x %ld). " "Kernel size: (%d x %d x %d). Kernel size can't be greater than actual input size", exactInputDepth, exactInputHeight, exactInputWidth, kT, kH, kW); } outputDepth = div_rtn<int64_t>(exactInputDepth - kT, dT) + 1; outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1; outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1; if (outputDepth < 1 || outputWidth < 1 || outputHeight < 1) { THError("Given input size per channel: (%ld x %ld x %ld). " "Calculated output size per channel: (%ld x %ld x %ld). Output size is too small", inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth); } if (weight != NULL) { int64_t nInputPlane = weight->size(1); if (weight->dim() == 2) { nInputPlane /= (kT * kH * kW); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); } if (gradOutput != NULL) { if (weight != NULL) { int64_t nOutputPlane = weight->size(0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } else if (bias != NULL) { int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(newViewWeight)(THTensor *weight) { weight = THTensor_(newContiguous)(weight); if (weight->dim() == 5) { int64_t s1 = weight->size(0); int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3) * weight->size(4); THTensor *old_weight = weight; weight = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(weight), weight->storage_offset(), s1, -1, s2, -1); c10::raw::intrusive_ptr::decref(old_weight); } return weight; } // Kernel for fast unfold+copy // Borrowed from Theano // Authors: Arjun Jain, Frédéric Bastien, Jan Schlüter, Nicolas Ballas static void THNN_(unfolded_acc_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int64_t nInputPlane, int64_t inputDepth, int64_t inputWidth, int64_t inputHeight, int64_t outputDepth, int64_t outputWidth, int64_t outputHeight) { scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); #ifdef _OPENMP int inOmp = omp_in_parallel(); #pragma omp parallel if (!inOmp) firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, nInputPlane, inputHeight, inputWidth, inputDepth) { size_t num_threads = omp_get_num_threads(); size_t tid = omp_get_thread_num(); int64_t n = nInputPlane * inputHeight * inputWidth * inputDepth; int64_t seg_len_tmp = n / num_threads; int64_t line_index_offset = tid * seg_len_tmp; int64_t line_seg_len = (tid == num_threads - 1)? (n-line_index_offset) : seg_len_tmp; int64_t w = line_index_offset % inputWidth + pW; int64_t h_index = line_index_offset / inputWidth; int64_t h = h_index % inputHeight + pH; int64_t d_index = h_index / inputHeight; int64_t d = d_index % inputDepth + pT; int64_t c = d_index / inputDepth; #else int64_t line_seg_len = nInputPlane * inputHeight * inputWidth * inputDepth; int64_t line_index_offset = 0; int64_t w = pW; int64_t h = pH; int64_t d = pT; int64_t c = 0;; #endif int64_t outputHW = outputHeight * outputWidth; int64_t outputDHW = outputDepth * outputHW; int64_t kHkW = kH*kW; int64_t kTkHkW = kT*kHkW; int64_t coeff_d_col = outputHW - dT * kHkW * outputDHW; int64_t coeff_h_col = outputWidth - dH * kW * outputDHW; int64_t coeff_w_col = (1 - dW * outputDHW); int64_t count = 0; while (count < line_seg_len) { // compute the start and end of the output int64_t w_col_start = (w < kW) ? 0 : (w - kW) / dW + 1; int64_t w_col_tmp = w / dW + 1; int64_t w_col_end = w_col_tmp < outputWidth? w_col_tmp : outputWidth; int64_t h_col_start = (h < kH) ? 0 : (h - kH) / dH + 1; int64_t h_col_tmp = h / dH + 1; int64_t h_col_end = h_col_tmp < outputHeight? h_col_tmp : outputHeight; int64_t d_col_start = (d < kT) ? 0 : (d - kT) / dT + 1; int64_t d_col_tmp = d / dT + 1; int64_t d_col_end = d_col_tmp < outputDepth? d_col_tmp : outputDepth; scalar_t val = 0; int64_t offset = (c * kTkHkW + d * kHkW + h * kW + w) * outputDHW; int64_t offset_w_col_start = w_col_start * coeff_w_col; int64_t offset_d_col_start = d_col_start * coeff_d_col; int64_t offset_h_col_start = h_col_start * coeff_h_col; int64_t offset_w_col = offset_w_col_start + offset; int64_t offset_d_col; int64_t offset_h_col; int64_t w_col, d_col, h_col; for (w_col = w_col_start; w_col < w_col_end; ++w_col) { offset_d_col = offset_d_col_start + offset_w_col; for (d_col = d_col_start; d_col < d_col_end; ++d_col) { offset_h_col = offset_h_col_start + offset_d_col; for (h_col = h_col_start; h_col < h_col_end; ++h_col) { val += finput_data[offset_h_col]; offset_h_col += coeff_h_col; } offset_d_col += coeff_d_col; } offset_w_col += coeff_w_col; } input_data[line_index_offset+count] = val; count++; if (count < line_seg_len) { if (w - pW + 1 == inputWidth) { w = pW; if (h - pH + 1 == inputHeight) { h = pH; if (d - pT + 1 == inputDepth) { d = pT; c++; } else d++; } else h++; } else w++; } } #ifdef _OPENMP } #endif } /* Modified from the version of CUDA implementation, but the loop iterations is larger than that one. The larger loop could lower the proportion of openmp overhead. And the inner part in loop is simpler. The naive code is below: scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); int64_t n = nInputPlane*kT*kH*kW*outputDepth*outputWidth*outputHeight; #pragma omp parallel for firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, inputHeight, inputWidth, inputDepth) for (int64_t idx = 0; idx < n ; ++idx) { int64_t w_out = line_index_offset % outputWidth; int64_t remained = line_index_offset / outputWidth; int64_t h_out = remained % outputHeight; remained /= outputHeight; int64_t d_out = remained % outputDepth; remained /= outputDepth; int k = remained % kW; remained /= kW; int j = remained % kH; remained /= kH; int i = remained % kT; int64_t nip = remained / kT; int64_t d = d_out * dT - pT + i; int64_t h = h_out * dH - pH + j; int64_t w = w_out * dW - pW + k; finput_data[idx] = (h >= 0 && w >= 0 && d >= 0 && h < inputHeight && w < inputWidth && d < inputDepth) ? input_data[nip*inputDepth*inputWidth*inputHeight+ d*inputHeight*inputWidth + h*inputWidth + w] : 0; } However, there are 6 quotient and 6 module operations which are very time-consuming. So we choose relatively more complex but more efficient pattern. */ static void THNN_(unfolded_copy_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int64_t nInputPlane, int64_t inputDepth, int64_t inputWidth, int64_t inputHeight, int64_t outputDepth, int64_t outputWidth, int64_t outputHeight) { scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); #ifdef _OPENMP int inOmp = omp_in_parallel(); #pragma omp parallel if (!inOmp) firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, nInputPlane, inputHeight, inputWidth, inputDepth) { size_t num_threads = omp_get_num_threads(); size_t tid = omp_get_thread_num(); int64_t n = nInputPlane*kT*kH*kW*outputDepth*outputWidth*outputHeight; int64_t seg_len_tmp = n / num_threads; int64_t line_index_offset = tid * seg_len_tmp; int64_t line_seg_len = (tid == num_threads - 1)? (n-line_index_offset) : seg_len_tmp; int64_t w_out = line_index_offset % outputWidth; int64_t remained = line_index_offset / outputWidth; int64_t h_out = remained % outputHeight; remained /= outputHeight; int64_t d_out = remained % outputDepth; remained /= outputDepth; int k = remained % kW; remained /= kW; int j = remained % kH; remained /= kH; int i = remained % kT; int64_t nip = remained / kT; #else int64_t line_seg_len = nInputPlane*kT*kH*kW*outputDepth*outputWidth*outputHeight; int64_t line_index_offset = 0; int64_t w_out = 0; int64_t h_out = 0; int64_t d_out = 0; int i = 0; int j = 0; int k = 0; int64_t nip = 0; #endif int64_t count = 0; scalar_t* dst = finput_data + line_index_offset; int64_t inputHW = inputHeight*inputWidth; int64_t inputDHW = inputHW*inputDepth; while (count < line_seg_len) { int64_t w = w_out * dW - pW + k; int64_t h = h_out * dH - pH + j; int64_t d = d_out * dT - pT + i; *dst = (h >= 0 && w >= 0 && d >= 0 && h < inputHeight && w < inputWidth && d < inputDepth) ? input_data[nip*inputDHW+ d*inputHW + h*inputWidth + w] : 0; count++; if (count < line_seg_len) { dst++; w_out++; if (w_out == outputWidth) { w_out = 0; h_out++; if (h_out == outputHeight) { h_out = 0; d_out++; if (d_out == outputDepth) { d_out = 0; k++; if(k == kW) { k = 0; j++; if(j == kH) { j = 0; i++; if(i == kT) { i = 0; nip++; } } } } } } } } #ifdef _OPENMP } #endif } static void THNN_(VolumetricConvolutionMM_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int64_t nInputPlane, int64_t inputDepth, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputDepth, int64_t outputWidth, int64_t outputHeight) { int64_t i; THTensor *output2d; THNN_(unfolded_copy_vol)( finput, input, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, outputDepth, outputWidth, outputHeight ); output2d = THTensor_(newWithStorage2d)( THTensor_getStoragePtr(output), output->storage_offset(), nOutputPlane, -1, outputDepth*outputHeight*outputWidth, -1 ); if (bias) { for (i = 0; i < nOutputPlane; i++) { THVector_(fill)( THStorage_(data)(THTensor_getStoragePtr(output))+output->storage_offset()+output->stride(0)*i, THTensor_(get1d)(bias, i), outputDepth*outputHeight*outputWidth ); } } else { THTensor_(zero)(output); } THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); c10::raw::intrusive_ptr::decref(output2d); } void THNN_(VolumetricConvolutionMM_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, // unused int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; int64_t nInputPlane; int64_t inputDepth; int64_t inputHeight; int64_t inputWidth; int64_t nOutputPlane; int64_t outputDepth; int64_t outputHeight; int64_t outputWidth; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, NULL, weight, bias, kT, kW, kH, dT, dW, dH, pT, pW, pH, 0); input = THTensor_(newContiguous)(input); if (input->dim() == 5) { dimf++; dimt++; dimh++; dimw++; } nInputPlane = input->size(dimf); inputDepth = input->size(dimt); inputHeight = input->size(dimh); inputWidth = input->size(dimw); nOutputPlane = weight->size(0); outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; weight = THNN_(newViewWeight)(weight); if (input->dim() == 4) { THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input, output, weight, bias, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth); #ifdef _OPENMP #pragma omp parallel for if(T > CONV3D_OMP_THRESHOLD) private(t) #endif for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input_t, output_t, weight, bias, finput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); c10::raw::intrusive_ptr::decref(input_t); c10::raw::intrusive_ptr::decref(output_t); c10::raw::intrusive_ptr::decref(finput_t); } } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(weight); } static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2)*gradOutput->size(3), -1 ); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); c10::raw::intrusive_ptr::decref(gradOutput2d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_vol)( fgradInput, gradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH, gradInput->size(0), gradInput->size(1), gradInput->size(3), gradInput->size(2), gradOutput->size(1), gradOutput->size(3), gradOutput->size(2) ); } void THNN_(VolumetricConvolutionMM_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, weight, NULL, kT, kW, kH, dT, dW, dH, pT, pW, pH, 0); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THNN_(newViewWeight)(weight); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); // depending on the BLAS library, fgradInput (result tensor) might // be left uninitialized on zero alpha, which might lead to weird behavior // hence, to be safe, zero it THTensor_(zero)(fgradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 0, 1); if (input->dim() == 4) { THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput, gradOutput, tweight, fgradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH ); } else { int64_t T = input->size(0); int64_t t; #ifdef _OPENMP #pragma omp parallel for if(T > CONV3D_OMP_THRESHOLD) private(t) #endif for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput_t, gradOutput_t, tweight, fgradInput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH ); c10::raw::intrusive_ptr::decref(gradInput_t); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(fgradInput_t); } } c10::raw::intrusive_ptr::decref(tweight); c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); c10::raw::intrusive_ptr::decref(weight); } static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, // can be NULL if gradWeight = NULL scalar_t scale) { int64_t i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2)*gradOutput->size(3), -1 ); if (gradWeight){ THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); c10::raw::intrusive_ptr::decref(tfinput); } if (gradBias) { for (i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; scalar_t sum = 0; scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); for (k = 0; k < gradOutput2d->size(1); k++) sum += data[k]; (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale * sum; } } c10::raw::intrusive_ptr::decref(gradOutput2d); } void THNN_(VolumetricConvolutionMM_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, accreal scale_) { scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, kT, kW, kH, dT, dW, dH, pT, pW, pH, 1); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); if (gradWeight) { gradWeight = THNN_(newViewWeight)(gradWeight); } if (input->dim() == 4) // non-batch mode { THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else // batch mode { int64_t T = input->size(0); int64_t t; #ifdef _OPENMP #pragma omp parallel for if(T > CONV3D_OMP_THRESHOLD) private(t) #endif for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = NULL; if (gradWeight) { finput_t = THTensor_(newSelect)(finput, 0, t); } THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); c10::raw::intrusive_ptr::decref(gradOutput_t); if (gradWeight) { c10::raw::intrusive_ptr::decref(finput_t); } } } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); if (gradWeight) { c10::raw::intrusive_ptr::decref(gradWeight); } } #endif
2_helloworld2.c
#include <stdio.h> #include <omp.h> int main(int argc, char** argv) { printf("Hello World :)\n"); #pragma omp parallel { printf("Hello World from thread %d\n", omp_get_thread_num()); printf("E agora??? %d\n", omp_get_thread_num()); } return 0; }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % snibgo (Alan Gibson) % % January 2022 % % % % % % % % Copyright @ 2022 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" #define MaxTokenLen 100 #define RpnInit 100 #define TableExtend 0.1 #define InitNumOprStack 50 #define MinValStackSize 100 #define InitNumUserSymbols 50 typedef long double fxFltType; typedef enum { oAddEq, oSubtractEq, oMultiplyEq, oDivideEq, oPlusPlus, oSubSub, oAdd, oSubtract, oMultiply, oDivide, oModulus, oUnaryPlus, oUnaryMinus, oLshift, oRshift, oEq, oNotEq, oLtEq, oGtEq, oLt, oGt, oLogAnd, oLogOr, oLogNot, oBitAnd, oBitOr, oBitNot, oPow, oQuery, oColon, oOpenParen, oCloseParen, oOpenBracket, oCloseBracket, oOpenBrace, oCloseBrace, oAssign, oNull } OperatorE; typedef struct { OperatorE op; const char * str; int precedence; /* Higher number is higher precedence */ int nArgs; } OperatorT; static const OperatorT Operators[] = { {oAddEq, "+=", 12, 1}, {oSubtractEq, "-=", 12, 1}, {oMultiplyEq, "*=", 13, 1}, {oDivideEq, "/=", 13, 1}, {oPlusPlus, "++", 12, 0}, {oSubSub, "--", 12, 0}, {oAdd, "+", 12, 2}, {oSubtract, "-", 12, 2}, {oMultiply, "*", 13, 2}, {oDivide, "/", 13, 2}, {oModulus, "%", 13, 2}, {oUnaryPlus, "+", 14, 1}, {oUnaryMinus, "-", 14, 1}, {oLshift, "<<", 11, 2}, {oRshift, ">>", 11, 2}, {oEq, "==", 9, 2}, {oNotEq, "!=", 9, 2}, {oLtEq, "<=", 10, 2}, {oGtEq, ">=", 10, 2}, {oLt, "<", 10, 2}, {oGt, ">", 10, 2}, {oLogAnd, "&&", 6, 2}, {oLogOr, "||", 5, 2}, {oLogNot, "!", 16, 1}, {oBitAnd, "&", 8, 2}, {oBitOr, "|", 7, 2}, {oBitNot, "~", 16, 1}, {oPow, "^", 15, 2}, {oQuery, "?", 4, 1}, {oColon, ":", 4, 1}, {oOpenParen, "(", 0, 0}, {oCloseParen, ")", 0, 0}, {oOpenBracket, "[", 0, 0}, {oCloseBracket,"]", 0, 0}, {oOpenBrace, "{", 0, 0}, {oCloseBrace, "}", 0, 0}, {oAssign, "=", 3, 1}, {oNull, "onull", 17, 0} }; typedef enum { cEpsilon, cE, cOpaque, cPhi, cPi, cQuantumRange, cQuantumScale, cTransparent, cMaxRgb, cNull } ConstantE; typedef struct { ConstantE cons; fxFltType val; const char * str; } ConstantT; static const ConstantT Constants[] = { {cEpsilon, MagickEpsilon, "epsilon"}, {cE, 2.7182818284590452354, "e"}, {cOpaque, 1.0, "opaque"}, {cPhi, MagickPHI, "phi"}, {cPi, MagickPI, "pi"}, {cQuantumRange, QuantumRange, "quantumrange"}, {cQuantumScale, QuantumScale, "quantumscale"}, {cTransparent, 0.0, "transparent"}, {cMaxRgb, QuantumRange, "MaxRGB"}, {cNull, 0.0, "cnull"} }; #define FirstFunc ((FunctionE) (oNull+1)) typedef enum { fAbs = oNull+1, #if defined(MAGICKCORE_HAVE_ACOSH) fAcosh, #endif fAcos, #if defined(MAGICKCORE_HAVE_J1) fAiry, #endif fAlt, #if defined(MAGICKCORE_HAVE_ASINH) fAsinh, #endif fAsin, #if defined(MAGICKCORE_HAVE_ATANH) fAtanh, #endif fAtan2, fAtan, fCeil, fChannel, fClamp, fCosh, fCos, fDebug, fDrc, #if defined(MAGICKCORE_HAVE_ERF) fErf, #endif fExp, fFloor, fGauss, fGcd, fHypot, fInt, fIsnan, #if defined(MAGICKCORE_HAVE_J0) fJ0, #endif #if defined(MAGICKCORE_HAVE_J1) fJ1, #endif #if defined(MAGICKCORE_HAVE_J1) fJinc, #endif fLn, fLogtwo, fLog, fMax, fMin, fMod, fNot, fPow, fRand, fRound, fSign, fSinc, fSinh, fSin, fSqrt, fSquish, fTanh, fTan, fTrunc, fDo, fFor, fIf, fWhile, fU, fU0, fUP, fS, fV, fP, fSP, fVP, fNull } FunctionE; typedef struct { FunctionE func; const char * str; int nArgs; } FunctionT; static const FunctionT Functions[] = { {fAbs, "abs" , 1}, #if defined(MAGICKCORE_HAVE_ACOSH) {fAcosh, "acosh" , 1}, #endif {fAcos, "acos" , 1}, #if defined(MAGICKCORE_HAVE_J1) {fAiry, "airy" , 1}, #endif {fAlt, "alt" , 1}, #if defined(MAGICKCORE_HAVE_ASINH) {fAsinh, "asinh" , 1}, #endif {fAsin, "asin" , 1}, #if defined(MAGICKCORE_HAVE_ATANH) {fAtanh, "atanh" , 1}, #endif {fAtan2, "atan2" , 2}, {fAtan, "atan" , 1}, {fCeil, "ceil" , 1}, {fChannel, "channel" , 5}, {fClamp, "clamp" , 1}, {fCosh, "cosh" , 1}, {fCos, "cos" , 1}, {fDebug, "debug" , 1}, {fDrc, "drc" , 2}, #if defined(MAGICKCORE_HAVE_ERF) {fErf, "erf" , 1}, #endif {fExp, "exp" , 1}, {fFloor, "floor" , 1}, {fGauss, "gauss" , 2}, {fGcd, "gcd" , 2}, {fHypot, "hypot" , 2}, {fInt, "int" , 1}, {fIsnan, "isnan" , 1}, #if defined(MAGICKCORE_HAVE_J0) {fJ0, "j0" , 1}, #endif #if defined(MAGICKCORE_HAVE_J1) {fJ1, "j1" , 1}, #endif #if defined(MAGICKCORE_HAVE_J1) {fJinc, "jinc" , 1}, #endif {fLn, "ln" , 1}, {fLogtwo, "logtwo", 1}, {fLog, "log" , 1}, {fMax, "max" , 2}, {fMin, "min" , 2}, {fMod, "mod" , 2}, {fNot, "not" , 1}, {fPow, "pow" , 2}, {fRand, "rand" , 0}, {fRound, "round" , 1}, {fSign, "sign" , 1}, {fSinc, "sinc" , 1}, {fSinh, "sinh" , 1}, {fSin, "sin" , 1}, {fSqrt, "sqrt" , 1}, {fSquish, "squish", 1}, {fTanh, "tanh" , 1}, {fTan, "tan" , 1}, {fTrunc, "trunc" , 1}, {fDo, "do", 2}, {fFor, "for", 3}, {fIf, "if", 3}, {fWhile, "while", 2}, {fU, "u", 1}, {fU0, "u0", 0}, {fUP, "up", 3}, {fS, "s", 0}, {fV, "v", 0}, {fP, "p", 2}, {fSP, "sp", 2}, {fVP, "vp", 2}, {fNull, "fnull" , 0} }; #define FirstImgAttr ((ImgAttrE) (fNull+1)) typedef enum { aDepth = fNull+1, aExtent, aKurtosis, aMaxima, aMean, aMedian, aMinima, aPage, aPageX, aPageY, aPageWid, aPageHt, aPrintsize, aPrintsizeX, aPrintsizeY, aQuality, aRes, aResX, aResY, aSkewness, aStdDev, aH, aN, aT, aW, aZ, aNull } ImgAttrE; typedef struct { ImgAttrE attr; const char * str; int NeedStats; } ImgAttrT; static const ImgAttrT ImgAttrs[] = { {aDepth, "depth", 1}, {aExtent, "extent", 0}, {aKurtosis, "kurtosis", 1}, {aMaxima, "maxima", 1}, {aMean, "mean", 1}, {aMedian, "median", 1}, {aMinima, "minima", 1}, {aPage, "page", 0}, {aPageX, "page.x", 0}, {aPageY, "page.y", 0}, {aPageWid, "page.width", 0}, {aPageHt, "page.height", 0}, {aPrintsize, "printsize", 0}, {aPrintsizeX, "printsize.x", 0}, {aPrintsizeY, "printsize.y", 0}, {aQuality, "quality", 0}, {aRes, "resolution", 0}, {aResX, "resolution.x", 0}, {aResY, "resolution.y", 0}, {aSkewness, "skewness", 1}, {aStdDev, "standard_deviation", 1}, {aH, "h", 0}, {aN, "n", 0}, {aT, "t", 0}, {aW, "w", 0}, {aZ, "z", 0}, {aNull, "anull", 0} }; #define FirstSym ((SymbolE) (aNull+1)) typedef enum { sHue = aNull+1, sIntensity, sLightness, sLuma, sLuminance, sSaturation, sA, sB, sC, sG, sI, sJ, sK, sM, sO, sR, sY, sNull } SymbolE; typedef struct { SymbolE sym; const char * str; } SymbolT; static const SymbolT Symbols[] = { {sHue, "hue"}, {sIntensity, "intensity"}, {sLightness, "lightness"}, {sLuma, "luma"}, {sLuminance, "luminance"}, {sSaturation, "saturation"}, {sA, "a"}, {sB, "b"}, {sC, "c"}, {sG, "g"}, {sI, "i"}, {sJ, "j"}, {sK, "k"}, {sM, "m"}, {sO, "o"}, {sR, "r"}, {sY, "y"}, {sNull, "snull"} }; /* There is no way to access new value of pixels. This might be a future enhancement, eg "q". fP, oU and oV can have channel qualifier such as "u.r". For meta channels, we might also allow numbered channels eg "u.2" or "u.16". ... or have extra argument to p[]. */ #define FirstCont (sNull+1) /* Run-time controls are in the RPN, not explicitly in the input string. */ typedef enum { rGoto = FirstCont, rIfZeroGoto, rIfNotZeroGoto, rCopyFrom, rCopyTo, rZerStk, rNull } ControlE; typedef struct { ControlE cont; const char * str; int nArgs; } ControlT; static const ControlT Controls[] = { {rGoto, "goto", 0}, {rIfZeroGoto, "ifzerogoto", 1}, {rIfNotZeroGoto, "ifnotzerogoto", 1}, {rCopyFrom, "copyfrom", 0}, {rCopyTo, "copyto", 1}, {rZerStk, "zerstk", 0}, {rNull, "rnull", 0} }; #define NULL_ADDRESS -2 typedef struct { int addrQuery; int addrColon; } TernaryT; typedef struct { const char * str; PixelChannel pixChan; } ChannelT; #define NO_CHAN_QUAL ((PixelChannel) (-1)) #define THIS_CHANNEL ((PixelChannel) (-2)) #define HUE_CHANNEL ((PixelChannel) (-3)) #define SAT_CHANNEL ((PixelChannel) (-4)) #define LIGHT_CHANNEL ((PixelChannel) (-5)) #define INTENSITY_CHANNEL ((PixelChannel) (-6)) static const ChannelT Channels[] = { {"r", RedPixelChannel}, {"g", GreenPixelChannel}, {"b", BluePixelChannel}, {"c", CyanPixelChannel}, {"m", MagentaPixelChannel}, {"y", YellowPixelChannel}, {"k", BlackPixelChannel}, {"a", AlphaPixelChannel}, {"o", AlphaPixelChannel}, {"hue", HUE_CHANNEL}, {"saturation", SAT_CHANNEL}, {"lightness", LIGHT_CHANNEL}, {"intensity", INTENSITY_CHANNEL}, {"all", CompositePixelChannel}, {"this", THIS_CHANNEL}, {"", NO_CHAN_QUAL} }; /* The index into UserSymbols is also the index into run-time UserSymVals. */ typedef struct { char * pex; size_t len; } UserSymbolT; typedef enum { etOperator, etConstant, etFunction, etImgAttr, etSymbol, etColourConstant, etControl } ElementTypeE; static const char * sElementTypes[] = { "Operator", "Constant", "Function", "ImgAttr", "Symbol", "ColConst", "Control" }; typedef struct { ElementTypeE type; fxFltType val, val1, val2; int oprNum; int nArgs; MagickBooleanType IsRelative; MagickBooleanType DoPush; int EleNdx; int nDest; /* Number of Elements that "goto" this element */ PixelChannel ChannelQual; ImgAttrE ImgAttrQual; char * pExpStart; int lenExp; } ElementT; typedef enum { rtUnknown, rtEntireImage, rtCornerOnly } RunTypeE; typedef struct { CacheView *View; /* Other per-image metadata could go here. */ } ImgT; typedef struct { RandomInfo * magick_restrict random_info; int numValStack; int usedValStack; fxFltType * ValStack; fxFltType * UserSymVals; Quantum * thisPixel; } fxRtT; struct _FxInfo { Image * image; size_t ImgListLen; ssize_t ImgNum; MagickBooleanType NeedStats; MagickBooleanType GotStats; MagickBooleanType NeedHsl; MagickBooleanType DebugOpt; /* Whether "-debug" option is in effect */ MagickBooleanType ContainsDebug; /* Whether expression contains "debug ()" function */ char * expression; char * pex; char ShortExp[MagickPathExtent]; /* for reporting */ int teDepth; char token[MagickPathExtent]; size_t lenToken; int numElements; int usedElements; ElementT * Elements; /* Elements is read-only at runtime. */ int numUserSymbols; int usedUserSymbols; UserSymbolT * UserSymbols; int numOprStack; int usedOprStack; int maxUsedOprStack; OperatorE * OperatorStack; ChannelStatistics ** statistics; int precision; RunTypeE runType; RandomInfo **magick_restrict random_infos; ImgT * Imgs; Image ** Images; ExceptionInfo * exception; fxRtT * fxrts; }; /* Forward declarations for recursion. */ static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit); static MagickBooleanType TranslateExpression (FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll); static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe); static MagickBooleanType InitFx (FxInfo * pfx, const Image * img, MagickBooleanType CalcAllStats, ExceptionInfo *exception) { ssize_t i=0; const Image * next; pfx->ImgListLen = GetImageListLength (img); pfx->ImgNum = GetImageIndexInList (img); pfx->image = (Image *)img; pfx->NeedStats = MagickFalse; pfx->GotStats = MagickFalse; pfx->NeedHsl = MagickFalse; pfx->DebugOpt = IsStringTrue (GetImageArtifact (img, "fx:debug")); pfx->statistics = NULL; pfx->Imgs = NULL; pfx->Images = NULL; pfx->exception = exception; pfx->precision = GetMagickPrecision (); pfx->random_infos = AcquireRandomInfoThreadSet (); pfx->ContainsDebug = MagickFalse; pfx->runType = (CalcAllStats) ? rtEntireImage : rtCornerOnly; pfx->Imgs = (ImgT *)AcquireQuantumMemory (pfx->ImgListLen, sizeof (ImgT)); if (!pfx->Imgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Imgs", "%lu", pfx->ImgListLen); return MagickFalse; } next = GetFirstImageInList (img); for ( ; next != (Image *) NULL; next=next->next) { ImgT * pimg = &pfx->Imgs[i]; pimg->View = AcquireVirtualCacheView (next, pfx->exception); if (!pimg->View) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "View", "[%li]", i); /* dealloc any done so far, and Imgs */ for ( ; i > 0; i--) { pimg = &pfx->Imgs[i-1]; pimg->View = DestroyCacheView (pimg->View); } pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs); return MagickFalse; } i++; } pfx->Images = ImageListToArray (img, pfx->exception); return MagickTrue; } static MagickBooleanType DeInitFx (FxInfo * pfx) { ssize_t i; if (pfx->Images) pfx->Images = (Image**) RelinquishMagickMemory (pfx->Images); if (pfx->Imgs) { for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) { ImgT * pimg = &pfx->Imgs[i-1]; pimg->View = DestroyCacheView (pimg->View); } pfx->Imgs=(ImgT *) RelinquishMagickMemory (pfx->Imgs); } pfx->random_infos = DestroyRandomInfoThreadSet (pfx->random_infos); if (pfx->statistics) { for (i = (ssize_t)GetImageListLength(pfx->image); i > 0; i--) { pfx->statistics[i-1]=(ChannelStatistics *) RelinquishMagickMemory (pfx->statistics[i-1]); } pfx->statistics = (ChannelStatistics**) RelinquishMagickMemory(pfx->statistics); } return MagickTrue; } static ElementTypeE TypeOfOpr (int op) { if (op < oNull) return etOperator; if (op == oNull) return etConstant; if (op <= fNull) return etFunction; if (op <= aNull) return etImgAttr; if (op <= sNull) return etSymbol; if (op <= rNull) return etControl; return (ElementTypeE) 0; } static char * SetPtrShortExp (FxInfo * pfx, char * pExp, size_t len) { #define MaxLen 20 size_t slen; char * p; *pfx->ShortExp = '\0'; if (pExp && len) { slen = CopyMagickString (pfx->ShortExp, pExp, len); if (slen > MaxLen) { (void) CopyMagickString (pfx->ShortExp+MaxLen, "...", 4); } p = strchr (pfx->ShortExp, '\n'); if (p) (void) CopyMagickString (p, "...", 4); p = strchr (pfx->ShortExp, '\r'); if (p) (void) CopyMagickString (p, "...", 4); } return pfx->ShortExp; } static char * SetShortExp (FxInfo * pfx) { return SetPtrShortExp (pfx, pfx->pex, MaxTokenLen-1); } static int FindUserSymbol (FxInfo * pfx, char * name) /* returns index into pfx->UserSymbols, and thus into pfxrt->UserSymVals, or NULL_ADDRESS if not found. */ { int i; size_t lenName; lenName = strlen (name); for (i=0; i < pfx->usedUserSymbols; i++) { UserSymbolT *pus = &pfx->UserSymbols[i]; if (lenName == pus->len && LocaleNCompare (name, pus->pex, lenName)==0) break; } if (i == pfx->usedUserSymbols) return NULL_ADDRESS; return i; } static MagickBooleanType ExtendUserSymbols (FxInfo * pfx) { pfx->numUserSymbols = (int) ceil (pfx->numUserSymbols * (1 + TableExtend)); pfx->UserSymbols = (UserSymbolT*) ResizeMagickMemory (pfx->UserSymbols, pfx->numUserSymbols * sizeof(UserSymbolT)); if (!pfx->UserSymbols) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymbols", "%i", pfx->numUserSymbols); return MagickFalse; } return MagickTrue; } static int AddUserSymbol (FxInfo * pfx, char * pex, size_t len) { UserSymbolT *pus; if (++pfx->usedUserSymbols >= pfx->numUserSymbols) { if (!ExtendUserSymbols (pfx)) return -1; } pus = &pfx->UserSymbols[pfx->usedUserSymbols-1]; pus->pex = pex; pus->len = len; return pfx->usedUserSymbols-1; } static void DumpTables (FILE * fh) { int i; for (i=0; i <= rNull; i++) { const char * str = ""; if ( i < oNull) str = Operators[i].str; if (i >= FirstFunc && i < fNull) str = Functions[i-FirstFunc].str; if (i >= FirstImgAttr && i < aNull) str = ImgAttrs[i-FirstImgAttr].str; if (i >= FirstSym && i < sNull) str = Symbols[i-FirstSym].str; if (i >= FirstCont && i < rNull) str = Controls[i-FirstCont].str; if (i==0 ) fprintf (stderr, "Operators:\n "); else if (i==oNull) fprintf (stderr, "\nFunctions:\n "); else if (i==fNull) fprintf (stderr, "\nImage attributes:\n "); else if (i==aNull) fprintf (stderr, "\nSymbols:\n "); else if (i==sNull) fprintf (stderr, "\nControls:\n "); fprintf (fh, " %s", str); } fprintf (fh, "\n"); } static char * NameOfUserSym (FxInfo * pfx, int ndx, char * buf) { UserSymbolT * pus; assert (ndx >= 0 && ndx < pfx->usedUserSymbols); pus = &pfx->UserSymbols[ndx]; (void) CopyMagickString (buf, pus->pex, pus->len+1); return buf; } static void DumpUserSymbols (FxInfo * pfx, FILE * fh) { char UserSym[MagickPathExtent]; int i; fprintf (fh, "UserSymbols (%i)\n", pfx->usedUserSymbols); for (i=0; i < pfx->usedUserSymbols; i++) { fprintf (fh, " %i: '%s'\n", i, NameOfUserSym (pfx, i, UserSym)); } } static MagickBooleanType BuildRPN (FxInfo * pfx) { pfx->numUserSymbols = InitNumUserSymbols; pfx->usedUserSymbols = 0; pfx->UserSymbols = (UserSymbolT*) AcquireMagickMemory (pfx->numUserSymbols * sizeof(UserSymbolT)); if (!pfx->UserSymbols) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymbols", "%i", pfx->numUserSymbols); return MagickFalse; } pfx->numElements = RpnInit; pfx->usedElements = 0; pfx->Elements = NULL; pfx->Elements = (ElementT*) AcquireMagickMemory (pfx->numElements * sizeof(ElementT)); if (!pfx->Elements) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Elements", "%i", pfx->numElements); return MagickFalse; } pfx->usedOprStack = 0; pfx->maxUsedOprStack = 0; pfx->numOprStack = InitNumOprStack; pfx->OperatorStack = (OperatorE*) AcquireMagickMemory (pfx->numOprStack * sizeof(OperatorE)); if (!pfx->OperatorStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "OperatorStack", "%i", pfx->numOprStack); return MagickFalse; } return MagickTrue; } static MagickBooleanType AllocFxRt (FxInfo * pfx, fxRtT * pfxrt) { int nRnd; int i; pfxrt->random_info = AcquireRandomInfo (); pfxrt->thisPixel = NULL; nRnd = 20 + 10 * (int) GetPseudoRandomValue (pfxrt->random_info); for (i=0; i < nRnd; i++) (void) GetPseudoRandomValue (pfxrt->random_info);; pfxrt->usedValStack = 0; pfxrt->numValStack = 2 * pfx->maxUsedOprStack; if (pfxrt->numValStack < MinValStackSize) pfxrt->numValStack = MinValStackSize; pfxrt->ValStack = (fxFltType*) AcquireMagickMemory (pfxrt->numValStack * sizeof(fxFltType)); if (!pfxrt->ValStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "ValStack", "%i", pfxrt->numValStack); return MagickFalse; } pfxrt->UserSymVals = NULL; if (pfx->usedUserSymbols) { pfxrt->UserSymVals = (fxFltType*) AcquireMagickMemory (pfx->usedUserSymbols * sizeof(fxFltType)); if (!pfxrt->UserSymVals) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "UserSymVals", "%i", pfx->usedUserSymbols); return MagickFalse; } for (i = 0; i < pfx->usedUserSymbols; i++) pfxrt->UserSymVals[i] = (fxFltType) 0; } return MagickTrue; } static MagickBooleanType ExtendRPN (FxInfo * pfx) { pfx->numElements = (int) ceil (pfx->numElements * (1 + TableExtend)); pfx->Elements = (ElementT*) ResizeMagickMemory (pfx->Elements, pfx->numElements * sizeof(ElementT)); if (!pfx->Elements) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Elements", "%i", pfx->numElements); return MagickFalse; } return MagickTrue; } static MagickBooleanType inline OprInPlace (int op) { return (op >= oAddEq && op <= oSubSub ? MagickTrue : MagickFalse); } static const char * OprStr (int oprNum) { const char * str; if (oprNum < 0) str = "bad OprStr"; else if (oprNum <= oNull) str = Operators[oprNum].str; else if (oprNum <= fNull) str = Functions[oprNum-FirstFunc].str; else if (oprNum <= aNull) str = ImgAttrs[oprNum-FirstImgAttr].str; else if (oprNum <= sNull) str = Symbols[oprNum-FirstSym].str; else if (oprNum <= rNull) str = Controls[oprNum-FirstCont].str; else { str = "bad OprStr"; } return str; } static MagickBooleanType DumpRPN (FxInfo * pfx, FILE * fh) { int i; fprintf (fh, "DumpRPN:"); fprintf (fh, " numElements=%i", pfx->numElements); fprintf (fh, " usedElements=%i", pfx->usedElements); fprintf (fh, " maxUsedOprStack=%i", pfx->maxUsedOprStack); fprintf (fh, " ImgListLen=%g", (double) pfx->ImgListLen); fprintf (fh, " NeedStats=%s", pfx->NeedStats ? "yes" : "no"); fprintf (fh, " GotStats=%s", pfx->GotStats ? "yes" : "no"); fprintf (fh, " NeedHsl=%s\n", pfx->NeedHsl ? "yes" : "no"); if (pfx->runType==rtEntireImage) fprintf (stderr, "EntireImage"); else if (pfx->runType==rtCornerOnly) fprintf (stderr, "CornerOnly"); fprintf (fh, "\n"); for (i=0; i < pfx->usedElements; i++) { ElementT * pel = &pfx->Elements[i]; pel->nDest = 0; } for (i=0; i < pfx->usedElements; i++) { ElementT * pel = &pfx->Elements[i]; if (pel->oprNum == rGoto || pel->oprNum == rIfZeroGoto || pel->oprNum == rIfNotZeroGoto) { if (pel->EleNdx >= 0 && pel->EleNdx < pfx->numElements) { ElementT * pelDest = &pfx->Elements[pel->EleNdx]; pelDest->nDest++; } } } for (i=0; i < pfx->usedElements; i++) { char UserSym[MagickPathExtent]; ElementT * pel = &pfx->Elements[i]; const char * str = OprStr (pel->oprNum); const char *sRelAbs = ""; if (pel->oprNum == fP || pel->oprNum == fUP || pel->oprNum == fVP || pel->oprNum == fSP) sRelAbs = pel->IsRelative ? "[]" : "{}"; if (pel->type == etColourConstant) fprintf (fh, " %i: %s vals=%.*Lg,%.*Lg,%.*Lg '%s%s' nArgs=%i ndx=%i %s", i, sElementTypes[pel->type], pfx->precision, pel->val, pfx->precision, pel->val1, pfx->precision, pel->val2, str, sRelAbs, pel->nArgs, pel->EleNdx, pel->DoPush ? "push" : "NO push"); else fprintf (fh, " %i: %s val=%.*Lg '%s%s' nArgs=%i ndx=%i %s", i, sElementTypes[pel->type], pfx->precision, pel->val, str, sRelAbs, pel->nArgs, pel->EleNdx, pel->DoPush ? "push" : "NO push"); if (pel->ImgAttrQual != aNull) fprintf (fh, " ia=%s", OprStr(pel->ImgAttrQual)); if (pel->ChannelQual != NO_CHAN_QUAL) { if (pel->ChannelQual == THIS_CHANNEL) fprintf (stderr, " ch=this"); else fprintf (stderr, " ch=%i", pel->ChannelQual); } if (pel->oprNum == rCopyTo) { fprintf (fh, " CopyTo ==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } else if (pel->oprNum == rCopyFrom) { fprintf (fh, " CopyFrom <== %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } else if (OprInPlace (pel->oprNum)) { fprintf (fh, " <==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym)); } if (pel->nDest > 0) fprintf (fh, " <==dest(%i)", pel->nDest); fprintf (fh, "\n"); } return MagickTrue; } static void DestroyRPN (FxInfo * pfx) { pfx->numOprStack = 0; pfx->usedOprStack = 0; if (pfx->OperatorStack) pfx->OperatorStack = (OperatorE*) RelinquishMagickMemory (pfx->OperatorStack); pfx->numElements = 0; pfx->usedElements = 0; if (pfx->Elements) pfx->Elements = (ElementT*) RelinquishMagickMemory (pfx->Elements); pfx->usedUserSymbols = 0; if (pfx->UserSymbols) pfx->UserSymbols = (UserSymbolT*) RelinquishMagickMemory (pfx->UserSymbols); } static void DestroyFxRt (fxRtT * pfxrt) { pfxrt->usedValStack = 0; if (pfxrt->ValStack) pfxrt->ValStack = (fxFltType*) RelinquishMagickMemory (pfxrt->ValStack); if (pfxrt->UserSymVals) pfxrt->UserSymVals = (fxFltType*) RelinquishMagickMemory (pfxrt->UserSymVals); pfxrt->random_info = DestroyRandomInfo (pfxrt->random_info); } static size_t GetToken (FxInfo * pfx) /* Returns length of token that starts with an alpha, or 0 if it isn't a token that starts with an alpha. j0 and j1 have trailing digit. Also colours like "gray47" have more trailing digits. After intial alpha(s) also allow single "_", eg "standard_deviation". Does not advance pfx->pex. This splits "mean.r" etc. */ { char * p = pfx->pex; size_t len = 0; *pfx->token = '\0'; pfx->lenToken = 0; if (!isalpha((int)*p)) return 0; /* Regard strings that start "icc-" or "device-", followed by any number of alphas, as a token. */ if (LocaleNCompare (p, "icc-", 4) == 0) { len = 4; p += 4; while (isalpha ((int)*p)) { len++; p++; } } else if (LocaleNCompare (p, "device-", 7) == 0) { len = 7; p += 7; while (isalpha ((int)*p)) { len++; p++; } } else { while (isalpha ((int)*p)) { len++; p++; } if (*p == '_') { len++; p++; } while (isalpha ((int)*p)) { len++; p++; } while (isdigit ((int)*p)) { len++; p++; } } if (len >= MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetToken: too long", "%g at '%s'", (double) len, SetShortExp(pfx)); len = MaxTokenLen; } if (len) { (void) CopyMagickString (pfx->token, pfx->pex, (len+1<MaxTokenLen)?len+1:MaxTokenLen); } pfx->lenToken = strlen (pfx->token); return len; } static MagickBooleanType TokenMaybeUserSymbol (FxInfo * pfx) { char * p = pfx->token; int i = 0; while (*p) { if (!isalpha ((int)*p++)) return MagickFalse; i++; } if (i < 2) return MagickFalse; return MagickTrue; } static MagickBooleanType AddElement (FxInfo * pfx, fxFltType val, int oprNum) { ElementT * pel; assert (oprNum <= rNull); if (++pfx->usedElements >= pfx->numElements) { if (!ExtendRPN (pfx)) return MagickFalse; } pel = &pfx->Elements[pfx->usedElements-1]; pel->type = TypeOfOpr (oprNum); pel->val = val; pel->val1 = (fxFltType) 0; pel->val2 = (fxFltType) 0; pel->oprNum = oprNum; pel->DoPush = MagickTrue; pel->EleNdx = 0; pel->ChannelQual = NO_CHAN_QUAL; pel->ImgAttrQual = aNull; pel->nDest = 0; pel->pExpStart = NULL; pel->lenExp = 0; if (oprNum <= oNull) pel->nArgs = Operators[oprNum].nArgs; else if (oprNum <= fNull) pel->nArgs = Functions[oprNum-FirstFunc].nArgs; else if (oprNum <= aNull) pel->nArgs = 0; else if (oprNum <= sNull) pel->nArgs = 0; else pel->nArgs = Controls[oprNum-FirstCont].nArgs; return MagickTrue; } static MagickBooleanType AddAddressingElement (FxInfo * pfx, int oprNum, int EleNdx) { ElementT * pel; if (!AddElement (pfx, (fxFltType) 0, oprNum)) return MagickFalse; pel = &pfx->Elements[pfx->usedElements-1]; pel->EleNdx = EleNdx; if (oprNum == rGoto || oprNum == rIfZeroGoto || oprNum == rIfNotZeroGoto || oprNum == rZerStk) { pel->DoPush = MagickFalse; } /* Note: for() may or may not need pushing, depending on whether the value is needed, eg "for(...)+2" or debug(for(...)). */ return MagickTrue; } static MagickBooleanType AddColourElement (FxInfo * pfx, fxFltType val0, fxFltType val1, fxFltType val2) { ElementT * pel; if (!AddElement (pfx, val0, oNull)) return MagickFalse; pel = &pfx->Elements[pfx->usedElements-1]; pel->val1 = val1; pel->val2 = val2; pel->type = etColourConstant; return MagickTrue; } static void inline SkipSpaces (FxInfo * pfx) { while (isspace ((int)*pfx->pex)) pfx->pex++; } static char inline PeekChar (FxInfo * pfx) { SkipSpaces (pfx); return *pfx->pex; } static MagickBooleanType inline PeekStr (FxInfo * pfx, const char * str) { SkipSpaces (pfx); return (LocaleNCompare (pfx->pex, str, strlen(str))==0 ? MagickTrue : MagickFalse); } static MagickBooleanType ExpectChar (FxInfo * pfx, char c) { if (PeekChar (pfx) != c) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected char", "'%c' at '%s'", c, SetShortExp (pfx)); return MagickFalse; } pfx->pex++; return MagickTrue; } static int MaybeXYWH (FxInfo * pfx, ImgAttrE * pop) /* If ".x" or ".y" or ".width" or ".height" increments *pop and returns 1 to 4 . Otherwise returns 0. */ { int ret=0; if (*pop != aPage && *pop != aPrintsize && *pop != aRes) return 0; if (PeekChar (pfx) != '.') return 0; if (!ExpectChar (pfx, '.')) return 0; (void) GetToken (pfx); if (LocaleCompare ("x", pfx->token)==0) ret=1; else if (LocaleCompare ("y", pfx->token)==0) ret=2; else if (LocaleCompare ("width", pfx->token)==0) ret=3; else if (LocaleCompare ("height", pfx->token)==0) ret=4; if (!ret) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Invalid 'x' or 'y' or 'width' or 'height' token=", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); if (*pop == aPage) (*pop) = (ImgAttrE) (*pop + ret); else { if (ret > 2) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Invalid 'width' or 'height' token=", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); } else { (*pop) = (ImgAttrE) (*pop + ret); } } pfx->pex+=pfx->lenToken; return ret; } static MagickBooleanType ExtendOperatorStack (FxInfo * pfx) { pfx->numOprStack = (int) ceil (pfx->numOprStack * (1 + TableExtend)); pfx->OperatorStack = (OperatorE*) ResizeMagickMemory (pfx->OperatorStack, pfx->numOprStack * sizeof(OperatorE)); if (!pfx->OperatorStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "OprStack", "%i", pfx->numOprStack); return MagickFalse; } return MagickTrue; } static MagickBooleanType PushOperatorStack (FxInfo * pfx, int op) { if (++pfx->usedOprStack >= pfx->numOprStack) { if (!ExtendOperatorStack (pfx)) return MagickFalse; } pfx->OperatorStack[pfx->usedOprStack-1] = (OperatorE) op; if (pfx->maxUsedOprStack < pfx->usedOprStack) pfx->maxUsedOprStack = pfx->usedOprStack; return MagickTrue; } static OperatorE GetLeadingOp (FxInfo * pfx) { OperatorE op = oNull; if (*pfx->pex == '-') op = oUnaryMinus; else if (*pfx->pex == '+') op = oUnaryPlus; else if (*pfx->pex == '~') op = oBitNot; else if (*pfx->pex == '!') op = oLogNot; else if (*pfx->pex == '(') op = oOpenParen; return op; } static MagickBooleanType inline OprIsUnaryPrefix (OperatorE op) { return (op == oUnaryMinus || op == oUnaryPlus || op == oBitNot || op == oLogNot ? MagickTrue : MagickFalse); } static MagickBooleanType TopOprIsUnaryPrefix (FxInfo * pfx) { if (!pfx->usedOprStack) return MagickFalse; return OprIsUnaryPrefix (pfx->OperatorStack[pfx->usedOprStack-1]); } static MagickBooleanType PopOprOpenParen (FxInfo * pfx, OperatorE op) { if (!pfx->usedOprStack) return MagickFalse; if (pfx->OperatorStack[pfx->usedOprStack-1] != op) return MagickFalse; pfx->usedOprStack--; return MagickTrue; } static int GetCoordQualifier (FxInfo * pfx, int op) /* Returns -1 if invalid CoordQualifier, +1 if valid and appropriate. */ { if (op != fU && op != fV && op != fS) return -1; (void) GetToken (pfx); if (pfx->lenToken != 1) { return -1; } if (*pfx->token != 'p' && *pfx->token != 'P') return -1; if (!GetFunction (pfx, fP)) return -1; return 1; } static PixelChannel GetChannelQualifier (FxInfo * pfx, int op) { if (op == fU || op == fV || op == fP || op == fUP || op == fVP || op == fS || (op >= FirstImgAttr && op <= aNull) ) { const ChannelT * pch = &Channels[0]; (void) GetToken (pfx); while (*pch->str) { if (LocaleCompare (pch->str, pfx->token)==0) { if (op >= FirstImgAttr && op <= (OperatorE)aNull && (pch->pixChan == HUE_CHANNEL || pch->pixChan == SAT_CHANNEL || pch->pixChan == LIGHT_CHANNEL) ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Can't have image attribute with HLS qualifier at", "'%s'", SetShortExp(pfx)); return NO_CHAN_QUAL; } pfx->pex += pfx->lenToken; return pch->pixChan; } pch++; } } return NO_CHAN_QUAL; } static ImgAttrE GetImgAttrToken (FxInfo * pfx) { ImgAttrE ia = aNull; const char * iaStr; for (ia = FirstImgAttr; ia < aNull; ia=(ImgAttrE) (ia+1)) { iaStr = ImgAttrs[ia-FirstImgAttr].str; if (LocaleCompare (iaStr, pfx->token)==0) { pfx->pex += strlen(pfx->token); if (ImgAttrs[ia-FirstImgAttr].NeedStats == 1) pfx->NeedStats = MagickTrue; MaybeXYWH (pfx, &ia); break; } } if (ia == aPage || ia == aPrintsize || ia == aRes) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attribute", "'%s' needs qualifier at '%s'", iaStr, SetShortExp(pfx)); } return ia; } static ImgAttrE GetImgAttrQualifier (FxInfo * pfx, int op) { ImgAttrE ia = aNull; if (op == (OperatorE)fU || op == (OperatorE)fV || op == (OperatorE)fP || op == (OperatorE)fS) { (void) GetToken (pfx); if (pfx->lenToken == 0) { return aNull; } ia = GetImgAttrToken (pfx); } return ia; } static MagickBooleanType IsQualifier (FxInfo * pfx) { if (PeekChar (pfx) == '.') { pfx->pex++; return MagickTrue; } return MagickFalse; } static ssize_t GetProperty (FxInfo * pfx, fxFltType *val) /* returns number of character to swallow. "-1" means invalid input "0" means no relevant input (don't swallow, but not an error) */ { if (PeekStr (pfx, "%[")) { int level = 0; size_t len; char sProperty [MagickPathExtent]; char * p = pfx->pex + 2; while (*p) { if (*p == '[') level++; else if (*p == ']') { if (level == 0) break; level--; } p++; } if (!*p || level != 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After '%[' expected ']' at", "'%s'", SetShortExp(pfx)); return -1; } len = (size_t) (p - pfx->pex + 1); if (len > MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Too much text between '%[' and ']' at", "'%s'", SetShortExp(pfx)); return -1; } (void) CopyMagickString (sProperty, pfx->pex, len+1); sProperty[len] = '\0'; { char * tailptr; char * text; text = InterpretImageProperties (pfx->image->image_info, pfx->image, sProperty, pfx->exception); if (!text || !*text) { text = DestroyString(text); (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Unknown property", "'%s' at '%s'", sProperty, SetShortExp(pfx)); return -1; } *val = strtold (text, &tailptr); if (text == tailptr) { text = DestroyString(text); (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Property", "'%s' text '%s' is not a number at '%s'", sProperty, text, SetShortExp(pfx)); return -1; } text = DestroyString(text); } return ((ssize_t) len); } return 0; } static ssize_t inline GetConstantColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2) /* Finds named colour such as "blue" and colorspace function such as "lab(10,20,30)". Returns number of characters to swallow. Return -1 means apparantly a constant colour, but with an error. Return 0 means not a constant colour, but not an error. */ { PixelInfo colour; ExceptionInfo *dummy_exception = AcquireExceptionInfo (); char *p; MagickBooleanType IsGray, IsIcc, IsDev; char ColSp[MagickPathExtent]; (void) CopyMagickString (ColSp, pfx->token, MaxTokenLen); p = ColSp + pfx->lenToken - 1; if (*p == 'a' || *p == 'A') *p = '\0'; (void) GetPixelInfo (pfx->image, &colour); /* "gray" is both a colorspace and a named colour. */ IsGray = (LocaleCompare (ColSp, "gray") == 0) ? MagickTrue : MagickFalse; IsIcc = (LocaleCompare (ColSp, "icc-color") == 0) ? MagickTrue : MagickFalse; IsDev = (LocaleNCompare (ColSp, "device-", 7) == 0) ? MagickTrue : MagickFalse; /* QueryColorCompliance will raise a warning if it isn't a colour, so we discard any exceptions. */ if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, dummy_exception) || IsGray) { ssize_t type = ParseCommandOption (MagickColorspaceOptions, MagickFalse, ColSp); if (type >= 0 || IsIcc || IsDev) { char * q = pfx->pex + pfx->lenToken; while (isspace((int) ((unsigned char) *q))) q++; if (*q == '(') { size_t lenfun; char sFunc[MagickPathExtent]; while (*q && *q != ')') q++; if (!*q) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "constant color missing ')'", "at '%s'", SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } lenfun = (size_t) (q - pfx->pex + 1); if (lenfun > MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "lenfun too long", "'%lu' at '%s'", lenfun, SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } (void) CopyMagickString (sFunc, pfx->pex, lenfun+1); if (QueryColorCompliance (sFunc, AllCompliance, &colour, dummy_exception)) { *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; dummy_exception = DestroyExceptionInfo (dummy_exception); return (ssize_t)lenfun; } } else { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "colorspace but not a valid color with '(...)' at", "'%s'", SetShortExp(pfx)); dummy_exception = DestroyExceptionInfo (dummy_exception); return -1; } } if (!IsGray) { dummy_exception = DestroyExceptionInfo (dummy_exception); return 0; } } *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; dummy_exception = DestroyExceptionInfo (dummy_exception); return (ssize_t)strlen (pfx->token); } static ssize_t inline GetHexColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2) /* Returns number of characters to swallow. Negative return means it starts with '#', but invalid hex number. */ { char * p; size_t len; PixelInfo colour; if (*pfx->pex != '#') return 0; /* find end of hex digits. */ p = pfx->pex + 1; while (isxdigit ((int)*p)) p++; if (isalpha ((int)*p)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad hex number at", "'%s'", SetShortExp(pfx)); return -1; } len = (size_t) (p - pfx->pex); if (len < 1) return 0; if (len >= MaxTokenLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Hex colour too long at", "'%s'", SetShortExp(pfx)); return -1; } (void) CopyMagickString (pfx->token, pfx->pex, len+1); (void) GetPixelInfo (pfx->image, &colour); if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "QueryColorCompliance rejected", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return -1; } *v0 = colour.red / QuantumRange; *v1 = colour.green / QuantumRange; *v2 = colour.blue / QuantumRange; return (ssize_t) len; } static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe) { /* A function, so get open-parens, n args, close-parens */ const char * funStr = Functions[fe-FirstFunc].str; int nArgs = Functions[fe-FirstFunc].nArgs; char chLimit = ')'; char expChLimit = ')'; const char *strLimit = ",)"; OperatorE pushOp = oOpenParen; char * pExpStart; int lenExp = 0; int FndArgs = 0; int ndx0 = NULL_ADDRESS, ndx1 = NULL_ADDRESS, ndx2 = NULL_ADDRESS, ndx3 = NULL_ADDRESS; MagickBooleanType coordQual = MagickFalse; PixelChannel chQual = NO_CHAN_QUAL; ImgAttrE iaQual = aNull; pfx->pex += pfx->lenToken; if (fe == fP) { char p = PeekChar (pfx); if (p=='{') { (void) ExpectChar (pfx, '{'); pushOp = oOpenBrace; strLimit = ",}"; chLimit = '}'; expChLimit = '}'; } else if (p=='[') { (void) ExpectChar (pfx, '['); pushOp = oOpenBracket; strLimit = ",]"; chLimit = ']'; expChLimit = ']'; } else { nArgs = 0; chLimit = ']'; expChLimit = ']'; } } else if (fe == fU) { char p = PeekChar (pfx); if (p=='[') { (void) ExpectChar (pfx, '['); pushOp = oOpenBracket; strLimit = ",]"; chLimit = ']'; expChLimit = ']'; } else { nArgs = 0; chLimit = ']'; expChLimit = ']'; } } else if (fe == fV || fe == fS) { nArgs = 0; pushOp = oOpenBracket; chLimit = ']'; expChLimit = ']'; } else { if (!ExpectChar (pfx, '(')) return MagickFalse; } if (!PushOperatorStack (pfx, pushOp)) return MagickFalse; pExpStart = pfx->pex; ndx0 = pfx->usedElements; if (fe==fDo) { (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx1+1 */ } while (nArgs > 0) { int FndOne = 0; if (TranslateStatementList (pfx, strLimit, &chLimit)) { FndOne = 1; } else { /* Maybe don't break because other expressions may be not empty. */ if (!chLimit) break; if (fe == fP || fe == fS|| fe == fIf) { (void) AddElement (pfx, (fxFltType) 0, oNull); FndOne = 1; } } if (strchr (strLimit, chLimit)==NULL) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected one of '%s' after expression but found '%c' at '%s'", funStr, strLimit, chLimit ? chLimit : ' ', SetShortExp(pfx)); return MagickFalse; } if (FndOne) { FndArgs++; nArgs--; } switch (FndArgs) { case 1: ndx1 = pfx->usedElements; if (fe==fWhile) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */ } else if (fe==fDo) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */ } else if (fe==fFor) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; } else if (fe==fIf) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2 + 1 */ pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from if() */ } break; case 2: ndx2 = pfx->usedElements; if (fe==fWhile) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx0); } else if (fe==fDo) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx0 + 1); } else if (fe==fFor) { (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx3 */ pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from for() */ (void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS); } else if (fe==fIf) { (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx3 */ } break; case 3: if (fe==fFor) { pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse; (void) AddAddressingElement (pfx, rGoto, ndx1); } ndx3 = pfx->usedElements; break; default: break; } if (chLimit == expChLimit) { lenExp = pfx->pex - pExpStart - 1; break; } } /* end while args of a function */ if (chLimit && chLimit != expChLimit && chLimit != ',' ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected '%c', found '%c' at '%s'", funStr, expChLimit, chLimit ? chLimit : ' ', SetShortExp(pfx)); return MagickFalse; } if (fe == fP || fe == fS || fe == fU) { while (FndArgs < Functions[fe-FirstFunc].nArgs) { (void) AddElement (pfx, (fxFltType) 0, oNull); FndArgs++; } } if (FndArgs > Functions[fe-FirstFunc].nArgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected %i arguments, found '%i' at '%s'", funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx)); return MagickFalse; } if (FndArgs < Functions[fe-FirstFunc].nArgs) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s' expected %i arguments, found too few (%i) at '%s'", funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx)); return MagickFalse; } if (fe != fS && fe != fV && FndArgs == 0 && Functions[fe-FirstFunc].nArgs == 0) { /* This is for "rand()" and similar. */ chLimit = expChLimit; if (!ExpectChar (pfx, ')')) return MagickFalse; } if (chLimit != expChLimit) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', arguments don't end with '%c' at '%s'", funStr, expChLimit, SetShortExp(pfx)); return MagickFalse; } if (!PopOprOpenParen (pfx, pushOp)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: For function", "'%s' tos not '%s' at '%s'", funStr, Operators[pushOp].str, SetShortExp(pfx)); return MagickFalse; } if (IsQualifier (pfx)) { if (fe == fU || fe == fV || fe == fS) { coordQual = (GetCoordQualifier (pfx, fe) == 1) ? MagickTrue : MagickFalse; if (coordQual) { /* Remove last element, which should be fP */ ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->oprNum != fP) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: For function", "'%s' last element not 'p' at '%s'", funStr, SetShortExp(pfx)); return MagickFalse; } chQual = pel->ChannelQual; expChLimit = (pel->IsRelative) ? ']' : '}'; pfx->usedElements--; if (fe == fU) fe = fUP; else if (fe == fV) fe = fVP; else if (fe == fS) fe = fSP; funStr = Functions[fe-FirstFunc].str; } } if ( chQual == NO_CHAN_QUAL && (fe == fP || fe == fS || fe == fSP || fe == fU || fe == fUP || fe == fV || fe == fVP) ) { chQual = GetChannelQualifier (pfx, fe); } if (chQual == NO_CHAN_QUAL && (fe == fU || fe == fV || fe == fS)) { /* Note: we don't allow "p.mean" etc. */ iaQual = GetImgAttrQualifier (pfx, fe); } if (IsQualifier (pfx) && chQual == NO_CHAN_QUAL && iaQual != aNull) { chQual = GetChannelQualifier (pfx, fe); } if (coordQual && iaQual != aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', can't have qualifiers 'p' and image attribute '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!coordQual && chQual == NO_CHAN_QUAL && iaQual == aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', bad qualifier '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!coordQual && chQual == CompositePixelChannel && iaQual == aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "For function", "'%s', bad composite qualifier '%s' at '%s'", funStr, pfx->token, SetShortExp(pfx)); return MagickFalse; } if (chQual == HUE_CHANNEL || chQual == SAT_CHANNEL || chQual == LIGHT_CHANNEL) { pfx->NeedHsl = MagickTrue; if (iaQual >= FirstImgAttr && iaQual < aNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Can't have image attribute with HLS qualifier at", "'%s'", SetShortExp(pfx)); return MagickFalse; } } } if (fe==fWhile) { pfx->Elements[ndx1].EleNdx = ndx2+1; } else if (fe==fDo) { pfx->Elements[ndx0].EleNdx = ndx1+1; pfx->Elements[ndx1].EleNdx = ndx2+1; } else if (fe==fFor) { pfx->Elements[ndx2].EleNdx = ndx3; } else if (fe==fIf) { pfx->Elements[ndx1].EleNdx = ndx2 + 1; pfx->Elements[ndx2].EleNdx = ndx3; } else { if (fe == fU && iaQual == aNull) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->type == etConstant && pel->val == 0.0) { pfx->usedElements--; fe = fU0; } } (void) AddElement (pfx, (fxFltType) 0, fe); if (fe == fP || fe == fU || fe == fU0 || fe == fUP || fe == fV || fe == fVP || fe == fS || fe == fSP) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; pel->IsRelative = (expChLimit == ']' ? MagickTrue : MagickFalse); if (chQual >= 0) pel->ChannelQual = chQual; if (iaQual != aNull && (fe == fU || fe == fV || fe == fS)) { /* Note: we don't allow "p[2,3].mean" or "p.mean" etc. */ pel->ImgAttrQual = iaQual; } } } if (pExpStart && lenExp) { ElementT * pel = &pfx->Elements[pfx->usedElements-1]; pel->pExpStart = pExpStart; pel->lenExp = lenExp; } if (fe == fDebug) pfx->ContainsDebug = MagickTrue; return MagickTrue; } static MagickBooleanType IsStealth (int op) { return (op == fU0 || op == fUP || op == fSP || op == fVP || (op >= FirstCont && op <= rNull) ? MagickTrue : MagickFalse ); } static MagickBooleanType GetOperand ( FxInfo * pfx, MagickBooleanType * UserSymbol, MagickBooleanType * NewUserSymbol, int * UserSymNdx, MagickBooleanType * needPopAll) { *NewUserSymbol = *UserSymbol = MagickFalse; *UserSymNdx = NULL_ADDRESS; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; (void) GetToken (pfx); if (pfx->lenToken==0) { /* Try '(' or unary prefix */ OperatorE op = GetLeadingOp (pfx); if (op==oOpenParen) { char chLimit = '\0'; if (!PushOperatorStack (pfx, op)) return MagickFalse; pfx->pex++; if (!TranslateExpression (pfx, ")", &chLimit, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Empty expression in parentheses at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (chLimit != ')') { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'(' but no ')' at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Top of opr stack should be '('. */ if (!PopOprOpenParen (pfx, oOpenParen)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bug: tos not '(' at", "'%s'", SetShortExp(pfx)); return MagickFalse; } return MagickTrue; } else if (OprIsUnaryPrefix (op)) { if (!PushOperatorStack (pfx, op)) return MagickFalse; pfx->pex++; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; if (!GetOperand (pfx, UserSymbol, NewUserSymbol, UserSymNdx, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After unary, bad operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (*NewUserSymbol) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "After unary, NewUserSymbol at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (*UserSymbol) { (void) AddAddressingElement (pfx, rCopyFrom, *UserSymNdx); *UserSymNdx = NULL_ADDRESS; *UserSymbol = MagickFalse; *NewUserSymbol = MagickFalse; } (void) GetToken (pfx); return MagickTrue; } else if (*pfx->pex == '#') { fxFltType v0=0, v1=0, v2=0; ssize_t lenToken = GetHexColour (pfx, &v0, &v1, &v2); if (lenToken < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad hex number at", "'%s'", SetShortExp(pfx)); return MagickFalse; } else if (lenToken > 0) { (void) AddColourElement (pfx, v0, v1, v2); pfx->pex+=lenToken; } return MagickTrue; } /* Try a constant number. */ { char * tailptr; ssize_t lenOptArt; fxFltType val = strtold (pfx->pex, &tailptr); if (pfx->pex != tailptr) { pfx->pex = tailptr; if (*tailptr) { /* Could have "prefix" K, Ki, M etc. See https://en.wikipedia.org/wiki/Metric_prefix and https://en.wikipedia.org/wiki/Binary_prefix */ double Pow = 0.0; const char Prefices[] = "yzafpnum.kMGTPEZY"; const char * pSi = strchr (Prefices, *tailptr); if (pSi && *pSi != '.') Pow = (pSi - Prefices) * 3 - 24; else if (*tailptr == 'c') Pow = -2; else if (*tailptr == 'h') Pow = 2; else if (*tailptr == 'k') Pow = 3; if (Pow != 0.0) { if (*(++pfx->pex) == 'i') { val *= pow (2.0, Pow/0.3); pfx->pex++; } else { val *= pow (10.0, Pow); } } } (void) AddElement (pfx, val, oNull); return MagickTrue; } val = (fxFltType) 0; lenOptArt = GetProperty (pfx, &val); if (lenOptArt < 0) return MagickFalse; if (lenOptArt > 0) { (void) AddElement (pfx, val, oNull); pfx->pex += lenOptArt; return MagickTrue; } } } /* end of lenToken==0 */ if (pfx->lenToken > 0) { /* Try a constant */ { ConstantE ce; for (ce = (ConstantE)0; ce < cNull; ce=(ConstantE) (ce+1)) { const char * ceStr = Constants[ce].str; if (LocaleCompare (ceStr, pfx->token)==0) { break; } } if (ce != cNull) { (void) AddElement (pfx, Constants[ce].val, oNull); pfx->pex += pfx->lenToken; return MagickTrue; } } /* Try a function */ { FunctionE fe; for (fe = FirstFunc; fe < fNull; fe=(FunctionE) (fe+1)) { const char * feStr = Functions[fe-FirstFunc].str; if (LocaleCompare (feStr, pfx->token)==0) { break; } } if (fe == fV && pfx->ImgListLen < 2) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Symbol 'v' but fewer than two images at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (IsStealth (fe)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Function", "'%s' not permitted at '%s'", pfx->token, SetShortExp(pfx)); } if (fe == fDo || fe == fFor || fe == fIf || fe == fWhile) { *needPopAll = MagickTrue; } if (fe != fNull) return (GetFunction (pfx, fe)); } /* Try image attribute */ { ImgAttrE ia = GetImgAttrToken (pfx); if (ia != aNull) { fxFltType val = 0; (void) AddElement (pfx, val, ia); if (ImgAttrs[ia-FirstImgAttr].NeedStats==1) { if (IsQualifier (pfx)) { PixelChannel chQual = GetChannelQualifier (pfx, ia); ElementT * pel; if (chQual == NO_CHAN_QUAL) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad channel qualifier at", "'%s'", SetShortExp(pfx)); return MagickFalse; } /* Adjust the element */ pel = &pfx->Elements[pfx->usedElements-1]; pel->ChannelQual = chQual; } } return MagickTrue; } } /* Try symbol */ { SymbolE se; for (se = FirstSym; se < sNull; se=(SymbolE) (se+1)) { const char * seStr = Symbols[se-FirstSym].str; if (LocaleCompare (seStr, pfx->token)==0) { break; } } if (se != sNull) { fxFltType val = 0; (void) AddElement (pfx, val, se); pfx->pex += pfx->lenToken; if (se==sHue || se==sSaturation || se==sLightness) pfx->NeedHsl = MagickTrue; return MagickTrue; } } /* Try constant colour. */ { fxFltType v0, v1, v2; ssize_t ColLen = GetConstantColour (pfx, &v0, &v1, &v2); if (ColLen < 0) return MagickFalse; if (ColLen > 0) { (void) AddColourElement (pfx, v0, v1, v2); pfx->pex+=ColLen; return MagickTrue; } } /* Try image artifact. */ { const char *artifact; artifact = GetImageArtifact (pfx->image, pfx->token); if (artifact != (const char *) NULL) { char * tailptr; fxFltType val = strtold (artifact, &tailptr); if (pfx->token == tailptr) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Artifact", "'%s' has value '%s', not a number, at '%s'", pfx->token, artifact, SetShortExp(pfx)); return MagickFalse; } (void) AddElement (pfx, val, oNull); pfx->pex+=pfx->lenToken; return MagickTrue; } } /* Try user symbols. If it is, don't AddElement yet. */ if (TokenMaybeUserSymbol (pfx)) { *UserSymbol = MagickTrue; *UserSymNdx = FindUserSymbol (pfx, pfx->token); if (*UserSymNdx == NULL_ADDRESS) { *UserSymNdx = AddUserSymbol (pfx, pfx->pex, pfx->lenToken); *NewUserSymbol = MagickTrue; } else { } pfx->pex += pfx->lenToken; return MagickTrue; } } (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } static MagickBooleanType inline IsRealOperator (OperatorE op) { return (op < oOpenParen || op > oCloseBrace) ? MagickTrue : MagickFalse; } static MagickBooleanType inline ProcessTernaryOpr (FxInfo * pfx, TernaryT * ptern) /* Ternary operator "... ? ... : ..." returns false iff we have exception */ { if (pfx->usedOprStack == 0) return MagickFalse; if (pfx->OperatorStack[pfx->usedOprStack-1] == oQuery) { if (ptern->addrQuery != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have '?' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have ':' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; ptern->addrQuery = pfx->usedElements; (void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be one after the Colon address. */ } else if (pfx->OperatorStack[pfx->usedOprStack-1] == oColon) { if (ptern->addrQuery == NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Need '?' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Already have ':' in sub-expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; ptern->addrColon = pfx->usedElements; pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; (void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be after the subexpression */ } return MagickTrue; } static MagickBooleanType GetOperator ( FxInfo * pfx, MagickBooleanType * Assign, MagickBooleanType * Update, MagickBooleanType * IncrDecr) { OperatorE op; size_t len = 0; MagickBooleanType DoneIt = MagickFalse; SkipSpaces (pfx); for (op = (OperatorE)0; op != oNull; op=(OperatorE) (op+1)) { const char * opStr = Operators[op].str; len = strlen(opStr); if (LocaleNCompare (opStr, pfx->pex, len)==0) { break; } } if (!IsRealOperator (op)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Not a real operator at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (op==oNull) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operator at", "'%s'", SetShortExp(pfx)); return MagickFalse; } *Assign = (op==oAssign) ? MagickTrue : MagickFalse; *Update = OprInPlace (op); *IncrDecr = (op == oPlusPlus || op == oSubSub) ? MagickTrue : MagickFalse; /* while top of OperatorStack is not empty and is not open-parens or assign, and top of OperatorStack is higher precedence than new op, then move top of OperatorStack to Element list. */ while (pfx->usedOprStack > 0) { OperatorE top = pfx->OperatorStack[pfx->usedOprStack-1]; int precTop, precNew; if (top == oOpenParen || top == oAssign || OprInPlace (top)) break; precTop = Operators[top].precedence; precNew = Operators[op].precedence; /* Assume left associativity. If right assoc, this would be "<=". */ if (precTop < precNew) break; (void) AddElement (pfx, (fxFltType) 0, top); pfx->usedOprStack--; } /* If new op is close paren, and stack top is open paren, remove stack top. */ if (op==oCloseParen) { if (pfx->usedOprStack == 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Found ')' but nothing on stack at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (pfx->OperatorStack[pfx->usedOprStack-1] != oOpenParen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Found ')' but no '(' on stack at", "'%s'", SetShortExp(pfx)); return MagickFalse; } pfx->usedOprStack--; DoneIt = MagickTrue; } if (!DoneIt) { if (!PushOperatorStack (pfx, op)) return MagickFalse; } pfx->pex += len; return MagickTrue; } static MagickBooleanType ResolveTernaryAddresses (FxInfo * pfx, TernaryT * ptern) { if (ptern->addrQuery == NULL_ADDRESS && ptern->addrColon == NULL_ADDRESS) return MagickTrue; if (ptern->addrQuery != NULL_ADDRESS && ptern->addrColon != NULL_ADDRESS) { pfx->Elements[ptern->addrQuery].EleNdx = ptern->addrColon + 1; pfx->Elements[ptern->addrColon].EleNdx = pfx->usedElements; ptern->addrQuery = NULL_ADDRESS; ptern->addrColon = NULL_ADDRESS; } else if (ptern->addrQuery != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'?' with no corresponding ':'", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } else if (ptern->addrColon != NULL_ADDRESS) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "':' with no corresponding '?'", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } return MagickTrue; } static MagickBooleanType TranslateExpression ( FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll) { /* There should be only one New per expression (oAssign), but can be many Old. */ MagickBooleanType UserSymbol, NewUserSymbol; int UserSymNdx0, UserSymNdx1; MagickBooleanType Assign = MagickFalse, Update = MagickFalse, IncrDecr = MagickFalse; int StartEleNdx; TernaryT ternary; ternary.addrQuery = NULL_ADDRESS; ternary.addrColon = NULL_ADDRESS; pfx->teDepth++; *chLimit = '\0'; StartEleNdx = pfx->usedElements-1; if (StartEleNdx < 0) StartEleNdx = 0; SkipSpaces (pfx); if (!*pfx->pex) { pfx->teDepth--; return MagickFalse; } if (strchr(strLimit,*pfx->pex)!=NULL) { *chLimit = *pfx->pex; pfx->pex++; pfx->teDepth--; return MagickFalse; } if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx0, needPopAll)) return MagickFalse; SkipSpaces (pfx); /* Loop through Operator, Operand, Operator, Operand, ... */ while (*pfx->pex && (!*strLimit || (strchr(strLimit,*pfx->pex)==NULL))) { if (!GetOperator (pfx, &Assign, &Update, &IncrDecr)) return MagickFalse; SkipSpaces (pfx); if (NewUserSymbol && !Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected assignment after new UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!UserSymbol && Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attempted assignment to non-UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (!UserSymbol && Update) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Attempted update to non-UserSymbol", "'%s' at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (UserSymbol && (Assign || Update) && !IncrDecr) { if (!TranslateExpression (pfx, strLimit, chLimit, needPopAll)) return MagickFalse; if (!*pfx->pex) break; if (!*strLimit) break; if (strchr(strLimit,*chLimit)!=NULL) break; } if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) { ElementT * pel; (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0); UserSymNdx0 = NULL_ADDRESS; pel = &pfx->Elements[pfx->usedElements-1]; pel->DoPush = MagickTrue; } if (UserSymbol) { while (TopOprIsUnaryPrefix (pfx)) { OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1]; (void) AddElement (pfx, (fxFltType) 0, op); pfx->usedOprStack--; } } if (!ProcessTernaryOpr (pfx, &ternary)) return MagickFalse; if (ternary.addrColon != NULL_ADDRESS) { if (!TranslateExpression (pfx, ",);", chLimit, needPopAll)) return MagickFalse; break; } UserSymbol = NewUserSymbol = MagickFalse; if ( (!*pfx->pex) || (*strLimit && (strchr(strLimit,*pfx->pex)!=NULL) ) ) { if (IncrDecr) break; (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand after operator", "at '%s'", SetShortExp(pfx)); return MagickFalse; } if (IncrDecr) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "'++' and '--' must be the final operators in an expression at", "'%s'", SetShortExp(pfx)); return MagickFalse; } if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx1, needPopAll)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Expected operand at", "'%s'", SetShortExp(pfx)); return MagickFalse; } SkipSpaces (pfx); if (NewUserSymbol && !Assign) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "NewUserSymbol", "'%s' after non-assignment operator at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } if (UserSymbol && !NewUserSymbol) { (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx1); UserSymNdx1 = NULL_ADDRESS; } UserSymNdx0 = UserSymNdx1; } if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) { ElementT * pel; if (NewUserSymbol) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "NewUserSymbol", "'%s' needs assignment operator at '%s'", pfx->token, SetShortExp(pfx)); return MagickFalse; } (void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0); pel = &pfx->Elements[pfx->usedElements-1]; pel->DoPush = MagickTrue; } if (*pfx->pex && !*chLimit && (strchr(strLimit,*pfx->pex)!=NULL)) { *chLimit = *pfx->pex; pfx->pex++; } while (pfx->usedOprStack) { OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1]; if (op == oOpenParen || op == oOpenBracket || op == oOpenBrace) { break; } if ( (op==oAssign && !Assign) || (OprInPlace(op) && !Update) ) { break; } pfx->usedOprStack--; (void) AddElement (pfx, (fxFltType) 0, op); if (op == oAssign) { /* Adjust last element, by deletion and add. */ pfx->usedElements--; (void) AddAddressingElement (pfx, rCopyTo, UserSymNdx0); break; } else if (OprInPlace (op)) { /* Modify latest element. */ pfx->Elements[pfx->usedElements-1].EleNdx = UserSymNdx0; break; } } (void) ResolveTernaryAddresses (pfx, &ternary); pfx->teDepth--; if (!pfx->teDepth && *needPopAll) { (void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS); *needPopAll = MagickFalse; } if (pfx->exception->severity != UndefinedException) return MagickFalse; return MagickTrue; } static MagickBooleanType TranslateStatement (FxInfo * pfx, char * strLimit, char * chLimit) { MagickBooleanType NeedPopAll = MagickFalse; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; if (!TranslateExpression (pfx, strLimit, chLimit, &NeedPopAll)) { return MagickFalse; } if (pfx->usedElements && *chLimit==';') { /* FIXME: not necessarily the last element, but the last _executed_ element, eg "goto" in a "for()"., Pending a fix, we will use rZerStk. */ ElementT * pel = &pfx->Elements[pfx->usedElements-1]; if (pel->DoPush) pel->DoPush = MagickFalse; } return MagickTrue; } static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit) { #define MAX_SLIMIT 10 char sLimits[MAX_SLIMIT]; SkipSpaces (pfx); if (!*pfx->pex) return MagickFalse; (void) CopyMagickString (sLimits, strLimit, MAX_SLIMIT-1); if (strchr(strLimit,';')==NULL) (void) ConcatenateMagickString (sLimits, ";", MAX_SLIMIT); for (;;) { if (!TranslateStatement (pfx, sLimits, chLimit)) return MagickFalse; if (!*pfx->pex) break; if (*chLimit != ';') { break; } } if (pfx->exception->severity != UndefinedException) return MagickFalse; return MagickTrue; } /*-------------------------------------------------------------------- Run-time */ static ChannelStatistics *CollectOneImgStats (FxInfo * pfx, Image * img) { int ch; ChannelStatistics * cs = GetImageStatistics (img, pfx->exception); /* Use RelinquishMagickMemory() somewhere. */ for (ch=0; ch <= (int) MaxPixelChannels; ch++) { cs[ch].mean *= QuantumScale; cs[ch].median *= QuantumScale; cs[ch].maxima *= QuantumScale; cs[ch].minima *= QuantumScale; cs[ch].standard_deviation *= QuantumScale; cs[ch].kurtosis *= QuantumScale; cs[ch].skewness *= QuantumScale; cs[ch].entropy *= QuantumScale; } return cs; } static MagickBooleanType CollectStatistics (FxInfo * pfx) { Image * img = GetFirstImageInList (pfx->image); size_t imgNum=0; pfx->statistics = (ChannelStatistics**) AcquireMagickMemory (pfx->ImgListLen * sizeof (ChannelStatistics *)); if (!pfx->statistics) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "Statistics", "%lu", pfx->ImgListLen); return MagickFalse; } for (;;) { pfx->statistics[imgNum] = CollectOneImgStats (pfx, img); if (++imgNum == pfx->ImgListLen) break; img = GetNextImageInList (img); assert (img != (Image *) NULL); } pfx->GotStats = MagickTrue; return MagickTrue; } static MagickBooleanType inline PushVal (FxInfo * pfx, fxRtT * pfxrt, fxFltType val, int addr) { if (pfxrt->usedValStack >=pfxrt->numValStack) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack overflow at addr=", "%i", addr); return MagickFalse; } pfxrt->ValStack[pfxrt->usedValStack++] = val; return MagickTrue; } static inline fxFltType PopVal (FxInfo * pfx, fxRtT * pfxrt, int addr) { if (pfxrt->usedValStack <= 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack underflow at addr=", "%i", addr); return (fxFltType) 0; } return pfxrt->ValStack[--pfxrt->usedValStack]; } static inline fxFltType ImageStat ( FxInfo * pfx, ssize_t ImgNum, PixelChannel channel, ImgAttrE ia) { ChannelStatistics * cs = NULL; fxFltType ret = 0; MagickBooleanType NeedRelinq = MagickFalse; assert (channel >= 0 && channel <= MaxPixelChannels); if (pfx->GotStats) { cs = pfx->statistics[ImgNum]; } else if (pfx->NeedStats) { /* If we need more than one statistic per pixel, this is inefficient. */ cs = CollectOneImgStats (pfx, pfx->Images[ImgNum]); NeedRelinq = MagickTrue; } switch (ia) { case aDepth: ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception); break; case aExtent: ret = (fxFltType) GetBlobSize (pfx->image); break; case aKurtosis: ret = cs[channel].kurtosis; break; case aMaxima: ret = cs[channel].maxima; break; case aMean: ret = cs[channel].mean; break; case aMedian: ret = cs[channel].median; break; case aMinima: ret = cs[channel].minima; break; case aPage: /* Do nothing */ break; case aPageX: ret = (fxFltType) pfx->Images[ImgNum]->page.x; break; case aPageY: ret = (fxFltType) pfx->Images[ImgNum]->page.y; break; case aPageWid: ret = (fxFltType) pfx->Images[ImgNum]->page.width; break; case aPageHt: ret = (fxFltType) pfx->Images[ImgNum]->page.height; break; case aPrintsize: /* Do nothing */ break; case aPrintsizeX: ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.x) * pfx->Images[ImgNum]->columns; break; case aPrintsizeY: ret = (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.y) * pfx->Images[ImgNum]->rows; break; case aQuality: ret = (fxFltType) pfx->Images[ImgNum]->quality; break; case aRes: /* Do nothing */ break; case aResX: ret = pfx->Images[ImgNum]->resolution.x; break; case aResY: ret = pfx->Images[ImgNum]->resolution.y; break; case aSkewness: ret = cs[channel].skewness; break; case aStdDev: ret = cs[channel].standard_deviation; break; case aH: ret = (fxFltType) pfx->Images[ImgNum]->rows; break; case aN: ret = (fxFltType) pfx->ImgListLen; break; case aT: /* image index in list */ ret = (fxFltType) ImgNum; break; case aW: ret = (fxFltType) pfx->Images[ImgNum]->columns; break; case aZ: ret = (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception); break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Unknown ia=", "%i", ia); } if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs); return ret; } static fxFltType inline FxGcd (fxFltType x, fxFltType y, const size_t depth) { #define FxMaxFunctionDepth 200 if (x < y) return (FxGcd (y, x, depth+1)); if ((fabs((double) y) < 0.001) || (depth >= FxMaxFunctionDepth)) return (x); return (FxGcd (y, x-y*floor((double) (x/y)), depth+1)); } static ssize_t inline ChkImgNum (FxInfo * pfx, fxFltType f) /* Returns -1 if f is too large. */ { ssize_t i = (ssize_t) floor ((double) f + 0.5); if (i < 0) i += pfx->ImgListLen; if (i < 0 || i >= (ssize_t)pfx->ImgListLen) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ImgNum", "%lu bad for ImgListLen %lu", i, pfx->ImgListLen); i = -1; } return i; } #define WHICH_ATTR_CHAN \ (pel->ChannelQual == NO_CHAN_QUAL) ? CompositePixelChannel : \ (pel->ChannelQual == THIS_CHANNEL) ? channel : pel->ChannelQual #define WHICH_NON_ATTR_CHAN \ (pel->ChannelQual == NO_CHAN_QUAL || \ pel->ChannelQual == THIS_CHANNEL || \ pel->ChannelQual == CompositePixelChannel \ ) ? (channel == CompositePixelChannel ? RedPixelChannel: channel) \ : pel->ChannelQual static fxFltType GetHslFlt (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy, int channel) { Image * img = pfx->Images[ImgNum]; double red, green, blue; double hue=0, saturation=0, lightness=0; MagickBooleanType okay = MagickTrue; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, RedPixelChannel, img->interpolate, (double) fx, (double) fy, &red, pfx->exception)) okay = MagickFalse; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, GreenPixelChannel, img->interpolate, (double) fx, (double) fy, &green, pfx->exception)) okay = MagickFalse; if(!InterpolatePixelChannel (img, pfx->Imgs[ImgNum].View, BluePixelChannel, img->interpolate, (double) fx, (double) fy, &blue, pfx->exception)) okay = MagickFalse; if (!okay) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetHslFlt failure", "%lu %Lg,%Lg %i", ImgNum, fx, fy, channel); ConvertRGBToHSL ( red, green, blue, &hue, &saturation, &lightness); if (channel == HUE_CHANNEL) return hue; if (channel == SAT_CHANNEL) return saturation; if (channel == LIGHT_CHANNEL) return lightness; return 0.0; } static fxFltType GetHslInt (FxInfo * pfx, ssize_t ImgNum, const ssize_t imgx, const ssize_t imgy, int channel) { Image * img = pfx->Images[ImgNum]; double hue=0, saturation=0, lightness=0; const Quantum * p = GetCacheViewVirtualPixels (pfx->Imgs[ImgNum].View, imgx, imgy, 1, 1, pfx->exception); if (!p) (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetHslInt failure", "%lu %li,%li %i", ImgNum, imgx, imgy, channel); ConvertRGBToHSL ( GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p), &hue, &saturation, &lightness); if (channel == HUE_CHANNEL) return hue; if (channel == SAT_CHANNEL) return saturation; if (channel == LIGHT_CHANNEL) return lightness; return 0.0; } static fxFltType inline GetIntensity (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy) { Quantum quantum_pixel[MaxPixelChannels]; PixelInfo pixelinf; Image * img = pfx->Images[ImgNum]; (void) GetPixelInfo (img, &pixelinf); if (!InterpolatePixelInfo (img, pfx->Imgs[pfx->ImgNum].View, img->interpolate, (double) fx, (double) fy, &pixelinf, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "GetIntensity failure", "%lu %Lg,%Lg", ImgNum, fx, fy); } SetPixelViaPixelInfo (img, &pixelinf, quantum_pixel); return QuantumScale * GetPixelIntensity (img, quantum_pixel); } static MagickBooleanType ExecuteRPN (FxInfo * pfx, fxRtT * pfxrt, fxFltType *result, const PixelChannel channel, const ssize_t imgx, const ssize_t imgy) { const Quantum * p = pfxrt->thisPixel; fxFltType regA=0, regB=0, regC=0, regD=0, regE=0; Image * img = pfx->image; ChannelStatistics * cs = NULL; MagickBooleanType NeedRelinq = MagickFalse; double hue=0, saturation=0, lightness=0; int i; /* For -fx, this sets p to ImgNum 0. for %[fx:...], this sets p to the currrent image. Similarly img. */ if (!p) p = GetCacheViewVirtualPixels ( pfx->Imgs[pfx->ImgNum].View, imgx, imgy, 1, 1, pfx->exception); if (pfx->GotStats) { cs = pfx->statistics[pfx->ImgNum]; } else if (pfx->NeedStats) { cs = CollectOneImgStats (pfx, pfx->Images[pfx->ImgNum]); NeedRelinq = MagickTrue; } /* Folllowing is only for expressions like "saturation", with no image specifier. */ if (pfx->NeedHsl) { ConvertRGBToHSL ( GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p), &hue, &saturation, &lightness); } for (i=0; i < pfx->usedElements; i++) { ElementT *pel = &pfx->Elements[i]; switch (pel->nArgs) { case 0: break; case 1: regA = PopVal (pfx, pfxrt, i); break; case 2: regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 3: regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 4: regD = PopVal (pfx, pfxrt, i); regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; case 5: regE = PopVal (pfx, pfxrt, i); regD = PopVal (pfx, pfxrt, i); regC = PopVal (pfx, pfxrt, i); regB = PopVal (pfx, pfxrt, i); regA = PopVal (pfx, pfxrt, i); break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Too many args:", "%i", pel->nArgs); break; } switch (pel->oprNum) { case oAddEq: regA = (pfxrt->UserSymVals[pel->EleNdx] += regA); break; case oSubtractEq: regA = (pfxrt->UserSymVals[pel->EleNdx] -= regA); break; case oMultiplyEq: regA = (pfxrt->UserSymVals[pel->EleNdx] *= regA); break; case oDivideEq: regA = (pfxrt->UserSymVals[pel->EleNdx] *= PerceptibleReciprocal((double)regA)); break; case oPlusPlus: regA = pfxrt->UserSymVals[pel->EleNdx]++; break; case oSubSub: regA = pfxrt->UserSymVals[pel->EleNdx]--; break; case oAdd: regA += regB; break; case oSubtract: regA -= regB; break; case oMultiply: regA *= regB; break; case oDivide: regA *= PerceptibleReciprocal((double)regB); break; case oModulus: regA = fmod ((double) regA, fabs(floor((double) regB+0.5))); break; case oUnaryPlus: /* Do nothing. */ break; case oUnaryMinus: regA = -regA; break; case oLshift: if ((size_t) (regB+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "undefined shift", "%g", (double) regB); regA = (fxFltType) 0.0; break; } regA = (fxFltType) ((size_t)(regA+0.5) << (size_t)(regB+0.5)); break; case oRshift: if ((size_t) (regB+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "undefined shift", "%g", (double) regB); regA = (fxFltType) 0.0; break; } regA = (fxFltType) ((size_t)(regA+0.5) >> (size_t)(regB+0.5)); break; case oEq: regA = fabs((double) (regA-regB)) < MagickEpsilon ? 1.0 : 0.0; break; case oNotEq: regA = fabs((double) (regA-regB)) >= MagickEpsilon ? 1.0 : 0.0; break; case oLtEq: regA = (regA <= regB) ? 1.0 : 0.0; break; case oGtEq: regA = (regA >= regB) ? 1.0 : 0.0; break; case oLt: regA = (regA < regB) ? 1.0 : 0.0; break; case oGt: regA = (regA > regB) ? 1.0 : 0.0; break; case oLogAnd: regA = (regA<=0) ? 0.0 : (regB > 0) ? 1.0 : 0.0; break; case oLogOr: regA = (regA>0) ? 1.0 : (regB > 0.0) ? 1.0 : 0.0; break; case oLogNot: regA = (regA==0) ? 1.0 : 0.0; break; case oBitAnd: regA = (fxFltType) ((size_t)(regA+0.5) & (size_t)(regB+0.5)); break; case oBitOr: regA = (fxFltType) ((size_t)(regA+0.5) | (size_t)(regB+0.5)); break; case oBitNot: /* Old fx doesn't add 0.5. */ regA = (fxFltType) (~(size_t)(regA+0.5)); break; case oPow: regA = pow ((double) regA, (double) regB); break; case oQuery: case oColon: break; case oOpenParen: case oCloseParen: case oOpenBracket: case oCloseBracket: case oOpenBrace: case oCloseBrace: break; case oAssign: pel->val = regA; break; case oNull: { if (pel->type == etColourConstant) { switch (channel) { default: case 0: regA = pel->val; break; case 1: regA = pel->val1; break; case 2: regA = pel->val2; break; } } else { regA = pel->val; } break; } case fAbs: regA = fabs ((double) regA); break; #if defined(MAGICKCORE_HAVE_ACOSH) case fAcosh: regA = acosh ((double) regA); break; #endif case fAcos: regA = acos ((double) regA); break; #if defined(MAGICKCORE_HAVE_J1) case fAiry: if (regA==0) regA = 1.0; else { fxFltType gamma = 2.0 * j1 ((MagickPI*regA)) / (MagickPI*regA); regA = gamma * gamma; } break; #endif case fAlt: regA = (fxFltType) (((ssize_t) regA) & 0x01 ? -1.0 : 1.0); break; #if defined(MAGICKCORE_HAVE_ASINH) case fAsinh: regA = asinh ((double) regA); break; #endif case fAsin: regA = asin ((double) regA); break; #if defined(MAGICKCORE_HAVE_ATANH) case fAtanh: regA = atanh ((double) regA); break; #endif case fAtan2: regA = atan2 ((double) regA, (double) regB); break; case fAtan: regA = atan ((double) regA); break; case fCeil: regA = ceil ((double) regA); break; case fChannel: switch (channel) { case 0: break; case 1: regA = regB; break; case 2: regA = regC; break; case 3: regA = regD; break; case 4: regA = regE; break; default: regA = 0.0; } break; case fClamp: if (regA < 0) regA = 0.0; else if (regA > 1.0) regA = 1.0; break; case fCosh: regA = cosh ((double) regA); break; case fCos: regA = cos ((double) regA); break; case fDebug: /* FIXME: debug() should give channel name. */ (void) fprintf (stderr, "%s[%g,%g].[%i]: %s=%.*Lg\n", img->filename, (double) imgx, (double) imgy, channel, SetPtrShortExp (pfx, pel->pExpStart, (size_t) (pel->lenExp+1)), pfx->precision, regA); break; case fDrc: regA = regA / (regB*(regA-1.0) + 1.0); break; #if defined(MAGICKCORE_HAVE_ERF) case fErf: regA = erf ((double) regA); break; #endif case fExp: regA = exp ((double) regA); break; case fFloor: regA = floor ((double) regA); break; case fGauss: regA = exp((double) (-regA*regA/2.0))/sqrt(2.0*MagickPI); break; case fGcd: if (!IsNaN(regA)) regA = FxGcd (regA, regB, 0); break; case fHypot: regA = hypot ((double) regA, (double) regB); break; case fInt: regA = floor ((double) regA); break; case fIsnan: regA = (fxFltType) (!!IsNaN (regA)); break; #if defined(MAGICKCORE_HAVE_J0) case fJ0: regA = j0 ((double) regA); break; #endif #if defined(MAGICKCORE_HAVE_J1) case fJ1: regA = j1 ((double) regA); break; #endif #if defined(MAGICKCORE_HAVE_J1) case fJinc: if (regA==0) regA = 1.0; else regA = 2.0 * j1 ((MagickPI*regA))/(MagickPI*regA); break; #endif case fLn: regA = log ((double) regA); break; case fLogtwo: regA = log10((double) regA) / log10(2.0); break; case fLog: regA = log10 ((double) regA); break; case fMax: regA = (regA > regB) ? regA : regB; break; case fMin: regA = (regA < regB) ? regA : regB; break; case fMod: regA = regA - floor((double) (regA*PerceptibleReciprocal((double) regB)))*regB; break; case fNot: regA = (fxFltType) (regA < MagickEpsilon); break; case fPow: regA = pow ((double) regA, (double) regB); break; case fRand: { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExecuteRPN) #endif regA = GetPseudoRandomValue (pfxrt->random_info); break; } case fRound: regA = floor ((double) regA + 0.5); break; case fSign: regA = (regA < 0) ? -1.0 : 1.0; break; case fSinc: regA = sin ((double) (MagickPI*regA)) / (MagickPI*regA); break; case fSinh: regA = sinh ((double) regA); break; case fSin: regA = sin ((double) regA); break; case fSqrt: regA = sqrt ((double) regA); break; case fSquish: regA = 1.0 / (1.0 + exp ((double) -regA)); break; case fTanh: regA = tanh ((double) regA); break; case fTan: regA = tan ((double) regA); break; case fTrunc: if (regA >= 0) regA = floor ((double) regA); else regA = ceil ((double) regA); break; case fDo: case fFor: case fIf: case fWhile: break; case fU: { /* Note: 1 value is available, index into image list. May have ImgAttr qualifier or channel qualifier or both. */ ssize_t ImgNum = ChkImgNum (pfx, regA); if (ImgNum < 0) break; regA = (fxFltType) 0; if (ImgNum == 0) { Image * pimg = pfx->Images[0]; int pech = (int)pel->ChannelQual; if (pel->ImgAttrQual == aNull) { if (pech < 0) { if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } else if (pech == HUE_CHANNEL || pech == SAT_CHANNEL || pech == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pech); break; } else if (pech == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, 0, (double) imgx, (double) imgy); break; } } else { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } } else { /* we have an image atttribute */ regA = ImageStat (pfx, 0, WHICH_ATTR_CHAN, pel->ImgAttrQual); } } else { /* We have non-zero ImgNum. */ if (pel->ImgAttrQual == aNull) { const Quantum * pv; if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, (fxFltType) imgx, (fxFltType) imgy); break; } } pv = GetCacheViewVirtualPixels ( pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU can't get cache", "%lu", ImgNum); break; } regA = QuantumScale * pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual); } } break; } case fU0: { /* No args. No image attribute. We may have a ChannelQual. If called from %[fx:...], ChannelQual will be CompositePixelChannel. */ Image * pimg = pfx->Images[0]; int pech = (int)pel->ChannelQual; if (pech < 0) { if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU0 can't get cache", "%i", 0); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } else if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, 0, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, 0, (fxFltType) imgx, (fxFltType) imgy); } } else { if (pfx->ImgNum==0) { regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[0].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fU0 can't get cache", "%i", 0); break; } regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset]; } } break; } case fUP: { /* 3 args are: ImgNum, x, y */ ssize_t ImgNum = ChkImgNum (pfx, regA); fxFltType fx, fy; if (ImgNum < 0) break; if (pel->IsRelative) { fx = imgx + regB; fy = imgy + regC; } else { fx = regB; fy = regC; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, fx, fy); break; } } { double v; Image * imUP = pfx->Images[ImgNum]; if (! InterpolatePixelChannel (imUP, pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN, imUP->interpolate, (double) fx, (double) fy, &v, pfx->exception)) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fUP can't get interpolate", "%lu", ImgNum); break; } regA = v * QuantumScale; } break; } case fS: case fV: { /* No args. */ ssize_t ImgNum = 1; if (pel->oprNum == fS) ImgNum = pfx->ImgNum; if (pel->ImgAttrQual == aNull) { const Quantum * pv = GetCacheViewVirtualPixels ( pfx->Imgs[ImgNum].View, imgx, imgy, 1,1, pfx->exception); if (!pv) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fV can't get cache", "%lu", ImgNum); break; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, (double) imgx, (double) imgy); break; } } regA = QuantumScale * pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset]; } else { regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual); } break; } case fP: case fSP: case fVP: { /* 2 args are: x, y */ fxFltType fx, fy; ssize_t ImgNum = pfx->ImgNum; if (pel->oprNum == fVP) ImgNum = 1; if (pel->IsRelative) { fx = imgx + regA; fy = imgy + regB; } else { fx = regA; fy = regB; } if ((int)pel->ChannelQual < 0) { if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL || pel->ChannelQual == LIGHT_CHANNEL) { regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual); break; } else if (pel->ChannelQual == INTENSITY_CHANNEL) { regA = GetIntensity (pfx, ImgNum, fx, fy); } } { double v; if (! InterpolatePixelChannel (pfx->Images[ImgNum], pfx->Imgs[ImgNum].View, WHICH_NON_ATTR_CHAN, pfx->Images[ImgNum]->interpolate, (double) fx, (double) fy, &v, pfx->exception) ) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "fSP or fVP can't get interp", "%lu", ImgNum); break; } regA = v * (fxFltType)QuantumScale; } break; } case fNull: break; case aDepth: regA = (fxFltType) GetImageDepth (img, pfx->exception); break; case aExtent: regA = (fxFltType) img->extent; break; case aKurtosis: regA = cs[WHICH_ATTR_CHAN].kurtosis; break; case aMaxima: regA = cs[WHICH_ATTR_CHAN].maxima; break; case aMean: regA = cs[WHICH_ATTR_CHAN].mean; break; case aMedian: regA = cs[WHICH_ATTR_CHAN].median; break; case aMinima: regA = cs[WHICH_ATTR_CHAN].minima; break; case aPage: break; case aPageX: regA = (fxFltType) img->page.x; break; case aPageY: regA = (fxFltType) img->page.y; break; case aPageWid: regA = (fxFltType) img->page.width; break; case aPageHt: regA = (fxFltType) img->page.height; break; case aPrintsize: break; case aPrintsizeX: regA = (fxFltType) PerceptibleReciprocal (img->resolution.x) * img->columns; break; case aPrintsizeY: regA = (fxFltType) PerceptibleReciprocal (img->resolution.y) * img->rows; break; case aQuality: regA = (fxFltType) img->quality; break; case aRes: break; case aResX: regA = (fxFltType) img->resolution.x; break; case aResY: regA = (fxFltType) img->resolution.y; break; case aSkewness: regA = cs[WHICH_ATTR_CHAN].skewness; break; case aStdDev: regA = cs[WHICH_ATTR_CHAN].standard_deviation; break; case aH: /* image->rows */ regA = (fxFltType) img->rows; break; case aN: /* image list length */ regA = (fxFltType) pfx->ImgListLen; break; case aT: /* image index in list */ regA = (fxFltType) pfx->ImgNum; break; case aW: /* image->columns */ regA = (fxFltType) img->columns; break; case aZ: /* image depth */ regA = (fxFltType) GetImageDepth (img, pfx->exception); break; case aNull: break; case sHue: /* of conversion to HSL */ regA = hue; break; case sIntensity: regA = GetIntensity (pfx, pfx->ImgNum, (double) imgx, (double) imgy); break; case sLightness: /* of conversion to HSL */ regA = lightness; break; case sLuma: /* calculation */ case sLuminance: /* as Luma */ regA = QuantumScale * (0.212656 * GetPixelRed (img,p) + 0.715158 * GetPixelGreen (img,p) + 0.072186 * GetPixelBlue (img,p)); break; case sSaturation: /* from conversion to HSL */ regA = saturation; break; case sA: /* alpha */ regA = QuantumScale * GetPixelAlpha (img, p); break; case sB: /* blue */ regA = QuantumScale * GetPixelBlue (img, p); break; case sC: /* red (ie cyan) */ regA = QuantumScale * GetPixelCyan (img, p); break; case sG: /* green */ regA = QuantumScale * GetPixelGreen (img, p); break; case sI: /* current x-coordinate */ regA = (fxFltType) imgx; break; case sJ: /* current y-coordinate */ regA = (fxFltType) imgy; break; case sK: /* black of CMYK */ regA = QuantumScale * GetPixelBlack (img, p); break; case sM: /* green (ie magenta) */ regA = QuantumScale * GetPixelGreen (img, p); break; case sO: /* alpha */ regA = QuantumScale * GetPixelAlpha (img, p); break; case sR: regA = QuantumScale * GetPixelRed (img, p); break; case sY: regA = QuantumScale * GetPixelYellow (img, p); break; case sNull: break; case rGoto: i = pel->EleNdx-1; /* -1 because 'for' loop will increment. */ break; case rIfZeroGoto: if (fabs((double) regA) < MagickEpsilon) i = pel->EleNdx-1; break; case rIfNotZeroGoto: if (fabs((double) regA) > MagickEpsilon) i = pel->EleNdx-1; break; case rCopyFrom: regA = pfxrt->UserSymVals[pel->EleNdx]; break; case rCopyTo: pfxrt->UserSymVals[pel->EleNdx] = regA; break; case rZerStk: pfxrt->usedValStack = 0; break; case rNull: break; default: (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "pel->oprNum", "%i '%s' not yet implemented", (int)pel->oprNum, OprStr(pel->oprNum)); break; } if (i < 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Bad run-time address", "%i", i); } if (pel->DoPush) if (!PushVal (pfx, pfxrt, regA, i)) break; } if (pfxrt->usedValStack > 0) regA = PopVal (pfx, pfxrt, 9999); *result = regA; if (NeedRelinq) cs = (ChannelStatistics *)RelinquishMagickMemory (cs); if (pfx->exception->severity != UndefinedException) { return MagickFalse; } if (pfxrt->usedValStack != 0) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "ValStack not empty", "(%i)", pfxrt->usedValStack); return MagickFalse; } return MagickTrue; } /* Following is substitute for FxEvaluateChannelExpression(). */ MagickPrivate MagickBooleanType FxEvaluateChannelExpression ( FxInfo *pfx, const PixelChannel channel, const ssize_t x, const ssize_t y, double *result, ExceptionInfo *exception) { const int id = GetOpenMPThreadId(); fxFltType ret; assert (pfx != NULL); assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); pfx->fxrts[id].thisPixel = NULL; if (!ExecuteRPN (pfx, &pfx->fxrts[id], &ret, channel, x, y)) { (void) ThrowMagickException ( exception, GetMagickModule(), OptionError, "ExcuteRPN failed", " "); return MagickFalse; } *result = (double) ret; return MagickTrue; } static FxInfo *AcquireFxInfoPrivate (const Image * images, const char * expression, MagickBooleanType CalcAllStats, ExceptionInfo *exception) { char chLimit; FxInfo * pfx = (FxInfo*) AcquireCriticalMemory (sizeof (*pfx)); memset (pfx, 0, sizeof (*pfx)); if (!InitFx (pfx, images, CalcAllStats, exception)) { pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (!BuildRPN (pfx)) { (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (*expression == '@') pfx->expression = FileToString (expression+1, ~0UL, exception); else pfx->expression = ConstantString (expression); pfx->pex = (char *)pfx->expression; pfx->teDepth = 0; if (!TranslateStatementList (pfx, ";", &chLimit)) { (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (pfx->teDepth) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "Translate expression depth", "(%i) not 0", pfx->teDepth); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (chLimit != '\0' && chLimit != ';') { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), OptionError, "AcquireFxInfo: TranslateExpression did not exhaust input", "(chLimit=%i) at'%s'", (int)chLimit, pfx->pex); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } if (pfx->NeedStats && pfx->runType == rtEntireImage && !pfx->statistics) { if (!CollectStatistics (pfx)) { (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } } if (pfx->DebugOpt) { DumpTables (stderr); DumpUserSymbols (pfx, stderr); (void) DumpRPN (pfx, stderr); } { size_t number_threads=(size_t) GetMagickResourceLimit(ThreadResource); ssize_t t; pfx->fxrts = (fxRtT *)AcquireQuantumMemory (number_threads, sizeof(fxRtT)); if (!pfx->fxrts) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "fxrts", "%lu", number_threads); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } for (t=0; t < (ssize_t) number_threads; t++) { if (!AllocFxRt (pfx, &pfx->fxrts[t])) { (void) ThrowMagickException ( pfx->exception, GetMagickModule(), ResourceLimitFatalError, "AllocFxRt t=", "%g", (double) t); { ssize_t t2; for (t2 = t-1; t2 >= 0; t2--) { DestroyFxRt (&pfx->fxrts[t]); } } pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts); (void) DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } } } return pfx; } FxInfo *AcquireFxInfo (const Image * images, const char * expression, ExceptionInfo *exception) { return AcquireFxInfoPrivate (images, expression, MagickFalse, exception); } FxInfo *DestroyFxInfo (FxInfo * pfx) { ssize_t t; assert (pfx != NULL); assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); for (t=0; t < (ssize_t) GetMagickResourceLimit(ThreadResource); t++) { DestroyFxRt (&pfx->fxrts[t]); } pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts); DestroyRPN (pfx); pfx->expression = DestroyString (pfx->expression); pfx->pex = NULL; (void) DeInitFx (pfx); pfx = (FxInfo*) RelinquishMagickMemory(pfx); return NULL; } /* Following is substitute for FxImage(). */ MagickExport Image *FxImage (const Image *image, const char *expression, ExceptionInfo *exception) { #define FxImageTag "FxNew/Image" CacheView *fx_view, *image_view; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; FxInfo *pfx; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (!fx_image) return NULL; if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_image=DestroyImage(fx_image); return NULL; } pfx = AcquireFxInfoPrivate (image, expression, MagickTrue, exception); if (!pfx) { fx_image=DestroyImage(fx_image); return NULL; } assert (pfx->image != NULL); assert (pfx->Images != NULL); assert (pfx->Imgs != NULL); assert (pfx->fxrts != NULL); status=MagickTrue; progress=0; image_view = AcquireVirtualCacheView (image, pfx->exception); fx_view = AcquireAuthenticCacheView (fx_image, pfx->exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows, \ pfx->ContainsDebug ? 0 : 1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; fxFltType result = 0.0; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels (image_view, 0, y, image->columns, 1, pfx->exception); q = QueueCacheViewAuthenticPixels (fx_view, 0, y, fx_image->columns, 1, pfx->exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { ssize_t i; pfx->fxrts[id].thisPixel = (Quantum *)p; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel (image, i); PixelTrait traits = GetPixelChannelTraits (image, channel); PixelTrait fx_traits = GetPixelChannelTraits (fx_image, channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel (fx_image, channel, p[i], q); continue; } if (!ExecuteRPN (pfx, &pfx->fxrts[id], &result, channel, x, y)) { status=MagickFalse; continue; } q[i] = ClampToQuantum ((MagickRealType) (QuantumRange*result)); } p+=GetPixelChannels (image); q+=GetPixelChannels (fx_image); } if (SyncCacheViewAuthenticPixels(fx_view, pfx->exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress (image, FxImageTag, progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view = DestroyCacheView (fx_view); image_view = DestroyCacheView (image_view); /* Before destroying the user symbol values, dump them to stderr. */ if (pfx->DebugOpt && pfx->usedUserSymbols) { int t, i; char UserSym[MagickPathExtent]; fprintf (stderr, "User symbols (%i):\n", pfx->usedUserSymbols); for (t=0; t < (int) GetMagickResourceLimit(ThreadResource); t++) { for (i = 0; i < (int) pfx->usedUserSymbols; i++) { fprintf (stderr, "th=%i us=%i '%s': %.*Lg\n", t, i, NameOfUserSym (pfx, i, UserSym), pfx->precision, pfx->fxrts[t].UserSymVals[i]); } } } if (pfx->exception->severity != UndefinedException) { status = MagickFalse; } if (status == MagickFalse) fx_image = DestroyImage (fx_image); pfx = DestroyFxInfo (pfx); return(fx_image); }
parallel_bmm.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> #include <time.h> #include "../inc/utils.h" #include "../inc/blocking.h" #include "../inc/bmm.h" #include "../inc/parallel_bmm.h" void blocked_bmm_shared(coo_format *C, blocked_csr *blk_A, csr_format *AA, blocked_csc *blk_B, csc_format *BB){ // dynamic memory allocation for submatrix C // We dont know how many non zero elements has each block has at first int t_n = 10000; int c_n = 0; int blk_ptr_A, blk_ptr_B; int blked_row, blked_col; coo2_format *sub_C; for(blked_row=0;blked_row<AA->N;blked_row ++){ #pragma omp parallel shared(blked_row, C,blk_A, AA, blk_B, BB) private(sub_C,blk_ptr_A,blk_ptr_B,blked_col) { #pragma omp for schedule(dynamic) nowait for(blked_col=0;blked_col<BB->M;blked_col++){ sub_C = (coo2_format *)malloc(sizeof(coo2_format)); sub_C->max_nz = t_n; sub_C->cur_nz = 0; sub_C->coo_I = (int *)malloc(sizeof(int)*sub_C->max_nz); sub_C->coo_J = (int *)malloc(sizeof(int)*sub_C->max_nz); blk_ptr_A = AA->csr_row[blked_row]; blk_ptr_B = BB->csc_col[blked_col]; while(blk_ptr_A < AA->csr_row[blked_row + 1] && blk_ptr_B < BB->csc_col[blked_col+1]){ if(AA->csr_col[blk_ptr_A] > BB->csc_row[blk_ptr_B]){ blk_ptr_B++; } else if(AA->csr_col[blk_ptr_A]<BB->csc_row[blk_ptr_B]){ blk_ptr_A++; } else{ submatrix_bmm(blk_A, blk_ptr_A, blk_B, blk_ptr_B, sub_C); blk_ptr_A++; blk_ptr_B++; } } #pragma omp critical(dataupdate) { insert_subcoo_2_coo(C, sub_C->coo_I, sub_C->coo_J,sub_C->cur_nz); } free(sub_C->coo_I); free(sub_C->coo_J); free(sub_C); } } } } void blocked_bmm_filtered_shared(coo_format *C, blocked_csr *blk_F,csr_format *FF,blocked_csr *blk_A, csr_format *AA, blocked_csc *blk_B, csc_format *BB){ int blk_ptr_A, blk_ptr_B; int blked_row, blked_col_ptr; coo2_format *sub_C; for(blked_row=0;blked_row<FF->N;blked_row++){ #pragma omp parallel shared(blked_row, C, blk_F, FF, blk_A, AA, blk_B, BB) private(sub_C, blk_ptr_A, blk_ptr_B, blked_col_ptr) { #pragma omp for schedule(dynamic) nowait for(blked_col_ptr=FF->csr_row[blked_row];blked_col_ptr<FF->csr_row[blked_row+1];blked_col_ptr++){ sub_C = (coo2_format *)malloc(sizeof(coo2_format)); sub_C->max_nz = blk_F->blk_nz[blked_col_ptr+1] - blk_F->blk_nz[blked_col_ptr]; sub_C->cur_nz = 0; sub_C->coo_I = (int *)malloc(sizeof(int) * sub_C->max_nz); sub_C->coo_J = (int *)malloc(sizeof(int) * sub_C->max_nz); blk_ptr_A = AA->csr_row[blked_row]; blk_ptr_B = BB->csc_col[FF->csr_col[blked_col_ptr]]; while(blk_ptr_A < AA->csr_row[blked_row + 1] && blk_ptr_B < BB->csc_col[FF->csr_col[blked_col_ptr]+1]){ if(AA->csr_col[blk_ptr_A] > BB->csc_row[blk_ptr_B]){ blk_ptr_B++; } else if(AA->csr_col[blk_ptr_A]<BB->csc_row[blk_ptr_B]){ blk_ptr_A++; } else{ submatrix_bmmfiltered(blk_F, blked_col_ptr, blk_A, blk_ptr_A, blk_B, blk_ptr_B, sub_C); blk_ptr_A++; blk_ptr_B++; } } #pragma omp critical(dataupdate_filtered) { insert_subcoo_2_coo(C, sub_C->coo_I, sub_C->coo_J,sub_C->cur_nz); } free(sub_C->coo_I); free(sub_C->coo_J); free(sub_C); } } } }
alloc_fail.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // RUN: %libomptarget-compile-nvptx64-nvidia-cuda // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 \ // RUN: | %fcheck-nvptx64-nvidia-cuda // CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{.*}} (8 bytes), but device allocation maps to host at 0x{{.*}} (8 bytes) // CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer (device failure or illegal mapping). // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory // UNSUPPORTED: clang-11 int main() { int arr[4] = {0, 1, 2, 3}; #pragma omp target data map(alloc: arr[0:2]) #pragma omp target data map(alloc: arr[1:2]) ; return 0; }
CLHelper_fpga.h
//------------------------------------------ //--cambine:helper function for OpenCL //--programmer: Jianbin Fang //--date: 27/12/2010 //------------------------------------------ #ifndef _CL_HELPER_ #define _CL_HELPER_ #include <CL/cl.h> #include <vector> #include <iostream> #include <fstream> #include <string> using std::string; using std::ifstream; using std::cerr; using std::endl; using std::cout; //#pragma OPENCL EXTENSION cl_nv_compiler_options:enable #define WORK_DIM 2 //work-items dimensions void _clFinish() throw(string); struct oclHandleStruct { cl_context context; cl_device_id *devices; cl_command_queue queue; cl_program program; cl_int cl_status; std::string error_str; std::vector<cl_kernel> kernel; }; struct oclHandleStruct oclHandles; char kernel_file[100] = "Kernels.cl"; int total_kernels = 2; string kernel_names[2] = {"BFS_1", "BFS_2"}; int work_group_size = 512; int device_id_inused = 0; //deviced id used (default : 0) /* * Converts the contents of a file into a string */ string FileToString(const string fileName) { ifstream f(fileName.c_str(), ifstream::in | ifstream::binary); try { size_t size; char* str; string s; if(f.is_open()) { size_t fileSize; f.seekg(0, ifstream::end); size = fileSize = f.tellg(); f.seekg(0, ifstream::beg); str = new char[size+1]; if (!str) throw(string("Could not allocate memory")); f.read(str, fileSize); f.close(); str[size] = '\0'; s = str; delete [] str; return s; } } catch(std::string msg) { cerr << "Exception caught in FileToString(): " << msg << endl; if(f.is_open()) f.close(); } catch(...) { cerr << "Exception caught in FileToString()" << endl; if(f.is_open()) f.close(); } string errorMsg = "FileToString()::Error: Unable to open file " + fileName; throw(errorMsg); } //--------------------------------------- //Read command line parameters // void _clCmdParams(int argc, char* argv[]){ for (int i =0; i < argc; ++i) { switch (argv[i][1]) { case 'g': //--g stands for size of work group if (++i < argc) { sscanf(argv[i], "%u", &work_group_size); } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; case 'd': //--d stands for device id used in computaion if (++i < argc) { sscanf(argv[i], "%u", &device_id_inused); } else { std::cerr << "Could not read argument after option " << argv[i-1] << std::endl; throw; } break; default: ; } } } //--------------------------------------- //Initlize CL objects //--description: there are 5 steps to initialize all the OpenCL objects needed //--revised on 04/01/2011: get the number of devices and // devices have no relationship with context void _clInit() { int DEVICE_ID_INUSED = device_id_inused; cl_int resultCL; oclHandles.context = NULL; oclHandles.devices = NULL; oclHandles.queue = NULL; oclHandles.program = NULL; cl_uint deviceListSize; //----------------------------------------------- //--cambine-1: find the available platforms and select one cl_uint numPlatforms; cl_platform_id targetPlatform = NULL; resultCL = clGetPlatformIDs(0, NULL, &numPlatforms); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting number of platforms (clGetPlatformIDs)")); //printf("number of platforms:%d\n",numPlatforms); //by cambine if (!(numPlatforms > 0)) throw (string("InitCL()::Error: No platforms found (clGetPlatformIDs)")); cl_platform_id* allPlatforms = (cl_platform_id*) malloc(numPlatforms * sizeof(cl_platform_id)); resultCL = clGetPlatformIDs(numPlatforms, allPlatforms, NULL); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting platform ids (clGetPlatformIDs)")); /* Select the 3rd platform (FPGA)*/ targetPlatform = allPlatforms[0]; //printf("Selected Platform = %s",targetPlatform); for (int i = 0; i < numPlatforms; i++) { char pbuff[128]; resultCL = clGetPlatformInfo( allPlatforms[i], CL_PLATFORM_VENDOR, sizeof(pbuff), pbuff, NULL); if (resultCL != CL_SUCCESS) throw (string("InitCL()::Error: Getting platform info (clGetPlatformInfo)")); //printf("vedor is %s\n",pbuff); } free(allPlatforms); //----------------------------------------------- //--cambine-2: create an OpenCL context cl_context_properties cprops[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)targetPlatform, 0 }; oclHandles.context = clCreateContextFromType(cprops, CL_DEVICE_TYPE_ALL, NULL, NULL, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.context == NULL)) throw (string("InitCL()::Error: Creating Context (clCreateContextFromType)")); //----------------------------------------------- //--cambine-3: detect OpenCL devices /* First, get the size of device list */ oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_ALL, 0, NULL, &deviceListSize); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exception in _clInit -> clGetDeviceIDs")); } if (deviceListSize == 0) throw(string("InitCL()::Error: No devices found.")); //std::cout<<"device number:"<<deviceListSize<<std::endl; /* Now, allocate the device list */ oclHandles.devices = (cl_device_id *)malloc(deviceListSize * sizeof(cl_device_id)); if (oclHandles.devices == 0) throw(string("InitCL()::Error: Could not allocate memory.")); /* Next, get the device list data */ oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_ALL, deviceListSize, \ oclHandles.devices, NULL); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exception in _clInit -> clGetDeviceIDs-2")); } //----------------------------------------------- //--cambine-4: Create an OpenCL command queue oclHandles.queue = clCreateCommandQueue(oclHandles.context, oclHandles.devices[DEVICE_ID_INUSED], 0, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.queue == NULL)) throw(string("InitCL()::Creating Command Queue. (clCreateCommandQueue)")); //----------------------------------------------- //--cambine-5: Load CL file, build CL program object, create CL kernel object /* std::string source_str = FileToString(kernel_file); const char * source = source_str.c_str(); size_t sourceSize[] = { source_str.length() }; oclHandles.program = clCreateProgramWithSource(oclHandles.context, 1, &source, sourceSize, &resultCL); if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) throw(string("InitCL()::Error: Loading Binary into cl_program. (clCreateProgramWithBinary)")); //insert debug information //std::string options= "-cl-nv-verbose"; //Doesn't work on AMD machines //options += " -cl-nv-opt-level=3"; resultCL = clBuildProgram(oclHandles.program, deviceListSize, oclHandles.devices, NULL, NULL,NULL); if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) { cerr << "InitCL()::Error: In clBuildProgram" << endl; size_t length; resultCL = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, 0, NULL, &length); if(resultCL != CL_SUCCESS) throw(string("InitCL()::Error: Getting Program build info(clGetProgramBuildInfo)")); char* buffer = (char*)malloc(length); resultCL = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, length, buffer, NULL); if(resultCL != CL_SUCCESS) throw(string("InitCL()::Error: Getting Program build info(clGetProgramBuildInfo)")); cerr << buffer << endl; free(buffer); throw(string("InitCL()::Error: Building Program (clBuildProgram)")); } */ //get program information in intermediate representation #ifdef PTX_MSG size_t binary_sizes[deviceListSize]; char * binaries[deviceListSize]; //figure out number of devices and the sizes of the binary for each device. oclHandles.cl_status = clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t)*deviceListSize, &binary_sizes, NULL ); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-2")); } std::cout<<"--cambine:"<<binary_sizes<<std::endl; //copy over all of the generated binaries. for(int i=0;i<deviceListSize;i++) binaries[i] = (char *)malloc( sizeof(char)*(binary_sizes[i]+1)); oclHandles.cl_status = clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARIES, sizeof(char *)*deviceListSize, binaries, NULL ); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-3")); } for(int i=0;i<deviceListSize;i++) binaries[i][binary_sizes[i]] = '\0'; std::cout<<"--cambine:writing ptd information..."<<std::endl; FILE * ptx_file = fopen("cl.ptx","w"); if(ptx_file==NULL){ throw(string("exceptions in allocate ptx file.")); } fprintf(ptx_file,"%s",binaries[DEVICE_ID_INUSED]); fclose(ptx_file); std::cout<<"--cambine:writing ptd information done."<<std::endl; for(int i=0;i<deviceListSize;i++) free(binaries[i]); #endif /* for (int nKernel = 0; nKernel < total_kernels; nKernel++) { //get a kernel object handle for a kernel with the given name cl_kernel kernel = clCreateKernel(oclHandles.program, (kernel_names[nKernel]).c_str(), &resultCL); if ((resultCL != CL_SUCCESS) || (kernel == NULL)) { string errorMsg = "InitCL()::Error: Creating Kernel (clCreateKernel) \"" + kernel_names[nKernel] + "\""; throw(errorMsg); } oclHandles.kernel.push_back(kernel); } */ //get resource alocation information #ifdef RES_MSG char * build_log; size_t ret_val_size; oclHandles.cl_status = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exceptions in _InitCL -> getting resource information")); } build_log = (char *)malloc(ret_val_size+1); oclHandles.cl_status = clGetProgramBuildInfo(oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED], CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL); if(oclHandles.cl_status!=CL_SUCCESS){ throw(string("exceptions in _InitCL -> getting resources allocation information-2")); } build_log[ret_val_size] = '\0'; std::cout<<"--cambine:"<<build_log<<std::endl; free(build_log); #endif } //--------------------------------------- //release CL objects void _clRelease() { char errorFlag = false; for (int nKernel = 0; nKernel < oclHandles.kernel.size(); nKernel++) { if (oclHandles.kernel[nKernel] != NULL) { cl_int resultCL = clReleaseKernel(oclHandles.kernel[nKernel]); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseKernel" << endl; errorFlag = true; } oclHandles.kernel[nKernel] = NULL; } oclHandles.kernel.clear(); } if (oclHandles.program != NULL) { cl_int resultCL = clReleaseProgram(oclHandles.program); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseProgram" << endl; errorFlag = true; } oclHandles.program = NULL; } if (oclHandles.queue != NULL) { cl_int resultCL = clReleaseCommandQueue(oclHandles.queue); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseCommandQueue" << endl; errorFlag = true; } oclHandles.queue = NULL; } free(oclHandles.devices); if (oclHandles.context != NULL) { cl_int resultCL = clReleaseContext(oclHandles.context); if (resultCL != CL_SUCCESS) { cerr << "ReleaseCL()::Error: In clReleaseContext" << endl; errorFlag = true; } oclHandles.context = NULL; } if (errorFlag) throw(string("ReleaseCL()::Error encountered.")); } //-------------------------------------------------------- //--cambine:create buffer and then copy data from host to device cl_mem _clCreateAndCpyMem(int size, void * h_mem_source) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, \ size, h_mem_source, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem()")); #endif return d_mem; } //------------------------------------------------------- //--cambine: create read only buffer for devices //--date: 17/01/2011 cl_mem _clMallocRW(int size, void * h_mem_ptr) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, size, h_mem_ptr, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMallocRW")); #endif return d_mem; } //------------------------------------------------------- //--cambine: create read and write buffer for devices //--date: 17/01/2011 cl_mem _clMalloc(int size, void * h_mem_ptr) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY | CL_MEM_COPY_HOST_PTR, size, h_mem_ptr, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMalloc")); #endif return d_mem; } //------------------------------------------------------- //--cambine: transfer data from host to device //--date: 17/01/2011 void _clMemcpyH2D(cl_mem d_mem, int size, const void *h_mem_ptr) throw(string){ oclHandles.cl_status = clEnqueueWriteBuffer(oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem_ptr, 0, NULL, NULL); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clMemcpyH2D")); #endif } //-------------------------------------------------------- //--cambine:create buffer and then copy data from host to device with pinned // memory cl_mem _clCreateAndCpyPinnedMem(int size, float* h_mem_source) throw(string){ cl_mem d_mem, d_mem_pinned; float * h_mem_pinned = NULL; d_mem_pinned = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, \ size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem()->d_mem_pinned")); #endif //------------ d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY, \ size, NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> d_mem ")); #endif //---------- h_mem_pinned = (cl_float *)clEnqueueMapBuffer(oclHandles.queue, d_mem_pinned, CL_TRUE, \ CL_MAP_WRITE, 0, size, 0, NULL, \ NULL, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueMapBuffer")); #endif int element_number = size/sizeof(float); #pragma omp parallel for for(int i=0;i<element_number;i++){ h_mem_pinned[i] = h_mem_source[i]; } //---------- oclHandles.cl_status = clEnqueueWriteBuffer(oclHandles.queue, d_mem, \ CL_TRUE, 0, size, h_mem_pinned, \ 0, NULL, NULL); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueWriteBuffer")); #endif return d_mem; } //-------------------------------------------------------- //--cambine:create write only buffer on device cl_mem _clMallocWO(int size) throw(string){ cl_mem d_mem; d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY, size, 0, &oclHandles.cl_status); #ifdef ERRMSG if(oclHandles.cl_status != CL_SUCCESS) throw(string("excpetion in _clCreateMem()")); #endif return d_mem; } //-------------------------------------------------------- //transfer data from device to host void _clMemcpyD2H(cl_mem d_mem, int size, void * h_mem) throw(string){ oclHandles.cl_status = clEnqueueReadBuffer(oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem, 0,0,0); _clFinish(); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clCpyMemD2H -> "; switch(oclHandles.cl_status){ case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_VALUE: oclHandles.error_str += "CL_INVALID_VALUE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } //-------------------------------------------------------- //set kernel arguments void _clSetArgs(int kernel_id, int arg_idx, void * d_mem, int size = 0) throw(string){ if(!size){ oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, sizeof(d_mem), &d_mem); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clSetKernelArg() "; switch(oclHandles.cl_status){ case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_ARG_INDEX: oclHandles.error_str += "CL_INVALID_ARG_INDEX"; break; case CL_INVALID_ARG_VALUE: oclHandles.error_str += "CL_INVALID_ARG_VALUE"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_SAMPLER: oclHandles.error_str += "CL_INVALID_SAMPLER"; break; case CL_INVALID_ARG_SIZE: oclHandles.error_str += "CL_INVALID_ARG_SIZE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } else{ oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, size, d_mem); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clSetKernelArg() "; switch(oclHandles.cl_status){ case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_ARG_INDEX: oclHandles.error_str += "CL_INVALID_ARG_INDEX"; break; case CL_INVALID_ARG_VALUE: oclHandles.error_str += "CL_INVALID_ARG_VALUE"; break; case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_INVALID_SAMPLER: oclHandles.error_str += "CL_INVALID_SAMPLER"; break; case CL_INVALID_ARG_SIZE: oclHandles.error_str += "CL_INVALID_ARG_SIZE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif } } void _clFinish() throw(string){ oclHandles.cl_status = clFinish(oclHandles.queue); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clFinish"; switch(oclHandles.cl_status){ case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unknown reasons"; break; } if(oclHandles.cl_status!=CL_SUCCESS){ throw(oclHandles.error_str); } #endif } //-------------------------------------------------------- //--cambine:enqueue kernel void _clInvokeKernel(int kernel_id, int work_items, int work_group_size) throw(string){ cl_uint work_dim = WORK_DIM; cl_event e[1]; if(work_items%work_group_size != 0) //process situations that work_items cannot be divided by work_group_size work_items = work_items + (work_group_size-(work_items%work_group_size)); size_t local_work_size[] = {work_group_size, 1}; size_t global_work_size[] = {work_items, 1}; oclHandles.cl_status = clEnqueueNDRangeKernel(oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0, \ global_work_size, local_work_size, 0 , 0, &(e[0]) ); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clInvokeKernel() -> "; switch(oclHandles.cl_status) { case CL_INVALID_PROGRAM_EXECUTABLE: oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE"; break; case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_KERNEL_ARGS: oclHandles.error_str += "CL_INVALID_KERNEL_ARGS"; break; case CL_INVALID_WORK_DIMENSION: oclHandles.error_str += "CL_INVALID_WORK_DIMENSION"; break; case CL_INVALID_GLOBAL_WORK_SIZE: oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE"; break; case CL_INVALID_WORK_GROUP_SIZE: oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE"; break; case CL_INVALID_WORK_ITEM_SIZE: oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE"; break; case CL_INVALID_GLOBAL_OFFSET: oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif _clFinish(); oclHandles.cl_status = clWaitForEvents(1, &e[0]); #ifdef ERRMSG if (oclHandles.cl_status!= CL_SUCCESS) throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents")); #endif } void _clInvokeKernel2D(int kernel_id, int range_x, int range_y, int group_x, int group_y) throw(string){ cl_uint work_dim = WORK_DIM; size_t local_work_size[] = {group_x, group_y}; size_t global_work_size[] = {range_x, range_y}; cl_event e[1]; /*if(work_items%work_group_size != 0) //process situations that work_items cannot be divided by work_group_size work_items = work_items + (work_group_size-(work_items%work_group_size));*/ oclHandles.cl_status = clEnqueueNDRangeKernel(oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0, \ global_work_size, local_work_size, 0 , 0, &(e[0]) ); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clInvokeKernel() -> "; switch(oclHandles.cl_status) { case CL_INVALID_PROGRAM_EXECUTABLE: oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE"; break; case CL_INVALID_COMMAND_QUEUE: oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE"; break; case CL_INVALID_KERNEL: oclHandles.error_str += "CL_INVALID_KERNEL"; break; case CL_INVALID_CONTEXT: oclHandles.error_str += "CL_INVALID_CONTEXT"; break; case CL_INVALID_KERNEL_ARGS: oclHandles.error_str += "CL_INVALID_KERNEL_ARGS"; break; case CL_INVALID_WORK_DIMENSION: oclHandles.error_str += "CL_INVALID_WORK_DIMENSION"; break; case CL_INVALID_GLOBAL_WORK_SIZE: oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE"; break; case CL_INVALID_WORK_GROUP_SIZE: oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE"; break; case CL_INVALID_WORK_ITEM_SIZE: oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE"; break; case CL_INVALID_GLOBAL_OFFSET: oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_MEM_OBJECT_ALLOCATION_FAILURE: oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE"; break; case CL_INVALID_EVENT_WAIT_LIST: oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if(oclHandles.cl_status != CL_SUCCESS) throw(oclHandles.error_str); #endif _clFinish(); oclHandles.cl_status = clWaitForEvents(1, &e[0]); #ifdef ERRMSG if (oclHandles.cl_status!= CL_SUCCESS) throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents")); #endif } //-------------------------------------------------------- //release OpenCL objects void _clFree(cl_mem ob) throw(string){ if(ob!=NULL) oclHandles.cl_status = clReleaseMemObject(ob); #ifdef ERRMSG oclHandles.error_str = "excpetion in _clFree() ->"; switch(oclHandles.cl_status) { case CL_INVALID_MEM_OBJECT: oclHandles.error_str += "CL_INVALID_MEM_OBJECT"; break; case CL_OUT_OF_RESOURCES: oclHandles.error_str += "CL_OUT_OF_RESOURCES"; break; case CL_OUT_OF_HOST_MEMORY: oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY"; break; default: oclHandles.error_str += "Unkown reseason"; break; } if (oclHandles.cl_status!= CL_SUCCESS) throw(oclHandles.error_str); #endif } #endif //_CL_HELPER_
ICP.h
/////////////////////////////////////////////////////////////////////////////// /// "Sparse Iterative Closest Point" /// by Sofien Bouaziz, Andrea Tagliasacchi, Mark Pauly /// Copyright (C) 2013 LGG, EPFL /////////////////////////////////////////////////////////////////////////////// /// 1) This file contains different implementations of the ICP algorithm. /// 2) This code requires EIGEN and NANOFLANN. /// 3) If OPENMP is activated some part of the code will be parallelized. /// 4) This code is for now designed for 3D registration /// 5) Two main input types are Eigen::Matrix3Xd or Eigen::Map<Eigen::Matrix3Xd> /////////////////////////////////////////////////////////////////////////////// /// namespace nanoflann: NANOFLANN KD-tree adaptor for EIGEN /// namespace RigidMotionEstimator: functions to compute the rigid motion /// namespace SICP: sparse ICP implementation /// namespace ICP: reweighted ICP implementation /////////////////////////////////////////////////////////////////////////////// #ifndef ICP_H #define ICP_H #include "nanoflann.hpp" #include <Eigen/Dense> /////////////////////////////////////////////////////////////////////////////// namespace nanoflann { /// KD-tree adaptor for working with data directly stored in an Eigen Matrix, without duplicating the data storage. /// This code is adapted from the KDTreeEigenMatrixAdaptor class of nanoflann.hpp template <class MatrixType, int DIM = -1, class Distance = nanoflann::metric_L2, typename IndexType = int> struct KDTreeAdaptor { typedef KDTreeAdaptor<MatrixType,DIM,Distance> self_t; typedef typename MatrixType::Scalar num_t; typedef typename Distance::template traits<num_t,self_t>::distance_t metric_t; typedef KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType> index_t; index_t* index; KDTreeAdaptor(const MatrixType &mat, const int leaf_max_size = 10) : m_data_matrix(mat) { const size_t dims = mat.rows(); index = new index_t( dims, *this, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) ); index->buildIndex(); } ~KDTreeAdaptor() {delete index;} const MatrixType &m_data_matrix; /// Query for the num_closest closest points to a given point (entered as query_point[0:dim-1]). inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq) const { nanoflann::KNNResultSet<typename MatrixType::Scalar,IndexType> resultSet(num_closest); resultSet.init(out_indices, out_distances_sq); index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); } /// Query for the closest points to a given point (entered as query_point[0:dim-1]). inline IndexType closest(const num_t *query_point) const { IndexType out_indices; num_t out_distances_sq; query(query_point, 1, &out_indices, &out_distances_sq); return out_indices; } const self_t & derived() const {return *this;} self_t & derived() {return *this;} inline size_t kdtree_get_point_count() const {return m_data_matrix.cols();} /// Returns the distance between the vector "p1[0:size-1]" and the data point with index "idx_p2" stored in the class: inline num_t kdtree_distance(const num_t *p1, const size_t idx_p2,size_t size) const { num_t s=0; for (size_t i=0; i<size; i++) { const num_t d= p1[i]-m_data_matrix.coeff(i,idx_p2); s+=d*d; } return s; } /// Returns the dim'th component of the idx'th point in the class: inline num_t kdtree_get_pt(const size_t idx, int dim) const { return m_data_matrix.coeff(dim,idx); } /// Optional bounding-box computation: return false to default to a standard bbox computation loop. template <class BBOX> bool kdtree_get_bbox(BBOX&) const {return false;} }; } /////////////////////////////////////////////////////////////////////////////// /// Compute the rigid motion for point-to-point and point-to-plane distances namespace RigidMotionEstimator { /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) /// @param Confidence weights template <typename Derived1, typename Derived2, typename Derived3> Eigen::Affine3d point_to_point(Eigen::MatrixBase<Derived1>& X, Eigen::MatrixBase<Derived2>& Y, const Eigen::MatrixBase<Derived3>& w) { /// Normalize weight vector Eigen::VectorXd w_normalized = w/w.sum(); /// De-mean Eigen::Vector3d X_mean, Y_mean; for(int i=0; i<3; ++i) { X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum(); Y_mean(i) = (Y.row(i).array()*w_normalized.transpose().array()).sum(); } X.colwise() -= X_mean; Y.colwise() -= Y_mean; /// Compute transformation Eigen::Affine3d transformation; Eigen::Matrix3d sigma = X * w_normalized.asDiagonal() * Y.transpose(); Eigen::JacobiSVD<Eigen::Matrix3d> svd(sigma, Eigen::ComputeFullU | Eigen::ComputeFullV); if(svd.matrixU().determinant()*svd.matrixV().determinant() < 0.0) { Eigen::Vector3d S = Eigen::Vector3d::Ones(); S(2) = -1.0; transformation.linear().noalias() = svd.matrixV()*S.asDiagonal()*svd.matrixU().transpose(); } else { transformation.linear().noalias() = svd.matrixV()*svd.matrixU().transpose(); } transformation.translation().noalias() = Y_mean - transformation.linear()*X_mean; /// Apply transformation X = transformation*X; /// Re-apply mean X.colwise() += X_mean; Y.colwise() += Y_mean; /// Return transformation return transformation; } /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) template <typename Derived1, typename Derived2> inline Eigen::Affine3d point_to_point(Eigen::MatrixBase<Derived1>& X, Eigen::MatrixBase<Derived2>& Y) { return point_to_point(X, Y, Eigen::VectorXd::Ones(X.cols())); } /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) /// @param Target normals (one 3D normal per column) /// @param Confidence weights /// @param Right hand side template <typename Derived1, typename Derived2, typename Derived3, typename Derived4, typename Derived5> Eigen::Affine3d point_to_plane(Eigen::MatrixBase<Derived1>& X, Eigen::MatrixBase<Derived2>& Y, Eigen::MatrixBase<Derived3>& N, const Eigen::MatrixBase<Derived4>& w, const Eigen::MatrixBase<Derived5>& u) { typedef Eigen::Matrix<double, 6, 6> Matrix66; typedef Eigen::Matrix<double, 6, 1> Vector6; typedef Eigen::Block<Matrix66, 3, 3> Block33; /// Normalize weight vector Eigen::VectorXd w_normalized = w/w.sum(); /// De-mean Eigen::Vector3d X_mean; for(int i=0; i<3; ++i) X_mean(i) = (X.row(i).array()*w_normalized.transpose().array()).sum(); X.colwise() -= X_mean; Y.colwise() -= X_mean; /// Prepare LHS and RHS Matrix66 LHS = Matrix66::Zero(); Vector6 RHS = Vector6::Zero(); Block33 TL = LHS.topLeftCorner<3,3>(); Block33 TR = LHS.topRightCorner<3,3>(); Block33 BR = LHS.bottomRightCorner<3,3>(); Eigen::MatrixXd C = Eigen::MatrixXd::Zero(3,X.cols()); #pragma omp parallel { #pragma omp for for(int i=0; i<X.cols(); i++) { C.col(i) = X.col(i).cross(N.col(i)); } #pragma omp sections nowait { #pragma omp section for(int i=0; i<X.cols(); i++) TL.selfadjointView<Eigen::Upper>().rankUpdate(C.col(i), w(i)); #pragma omp section for(int i=0; i<X.cols(); i++) TR += (C.col(i)*N.col(i).transpose())*w(i); #pragma omp section for(int i=0; i<X.cols(); i++) BR.selfadjointView<Eigen::Upper>().rankUpdate(N.col(i), w(i)); #pragma omp section for(int i=0; i<C.cols(); i++) { double dist_to_plane = -((X.col(i) - Y.col(i)).dot(N.col(i)) - u(i))*w(i); RHS.head<3>() += C.col(i)*dist_to_plane; RHS.tail<3>() += N.col(i)*dist_to_plane; } } } LHS = LHS.selfadjointView<Eigen::Upper>(); /// Compute transformation Eigen::Affine3d transformation; Eigen::LDLT<Matrix66> ldlt(LHS); RHS = ldlt.solve(RHS); transformation = Eigen::AngleAxisd(RHS(0), Eigen::Vector3d::UnitX()) * Eigen::AngleAxisd(RHS(1), Eigen::Vector3d::UnitY()) * Eigen::AngleAxisd(RHS(2), Eigen::Vector3d::UnitZ()); transformation.translation() = RHS.tail<3>(); /// Apply transformation X = transformation*X; /// Re-apply mean X.colwise() += X_mean; Y.colwise() += X_mean; /// Return transformation return transformation; } /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) /// @param Target normals (one 3D normal per column) /// @param Confidence weights template <typename Derived1, typename Derived2, typename Derived3, typename Derived4> inline Eigen::Affine3d point_to_plane(Eigen::MatrixBase<Derived1>& X, Eigen::MatrixBase<Derived2>& Yp, Eigen::MatrixBase<Derived3>& Yn, const Eigen::MatrixBase<Derived4>& w) { return point_to_plane(X, Yp, Yn, w, Eigen::VectorXd::Zero(X.cols())); } } /////////////////////////////////////////////////////////////////////////////// /// ICP implementation using ADMM/ALM/Penalty method namespace SICP { struct Parameters { bool use_penalty = false; /// if use_penalty then penalty method else ADMM or ALM (see max_inner) double p = 1.0; /// p norm double mu = 10.0; /// penalty weight double alpha = 1.2; /// penalty increase factor double max_mu = 1e5; /// max penalty int max_icp = 100; /// max ICP iteration int max_outer = 100; /// max outer iteration int max_inner = 1; /// max inner iteration. If max_inner=1 then ADMM else ALM double stop = 1e-5; /// stopping criteria bool print_icpn = false; /// (debug) print ICP iteration }; /// Shrinkage operator (Automatic loop unrolling using template) template<unsigned int I> inline double shrinkage(double mu, double n, double p, double s) { return shrinkage<I-1>(mu, n, p, 1.0 - (p/mu)*std::pow(n, p-2.0)*std::pow(s, p-1.0)); } template<> inline double shrinkage<0>(double, double, double, double s) {return s;} /// 3D Shrinkage for point-to-point template<unsigned int I> inline void shrink(Eigen::Matrix3Xd& Q, double mu, double p) { double Ba = std::pow((2.0/mu)*(1.0-p), 1.0/(2.0-p)); double ha = Ba + (p/mu)*std::pow(Ba, p-1.0); #pragma omp parallel for for(int i=0; i<Q.cols(); ++i) { double n = Q.col(i).norm(); double w = 0.0; if(n > ha) w = shrinkage<I>(mu, n, p, (Ba/n + 1.0)/2.0); Q.col(i) *= w; } } /// 1D Shrinkage for point-to-plane template<unsigned int I> inline void shrink(Eigen::VectorXd& y, double mu, double p) { double Ba = std::pow((2.0/mu)*(1.0-p), 1.0/(2.0-p)); double ha = Ba + (p/mu)*std::pow(Ba, p-1.0); #pragma omp parallel for for(int i=0; i<y.rows(); ++i) { double n = std::abs(y(i)); double s = 0.0; if(n > ha) s = shrinkage<I>(mu, n, p, (Ba/n + 1.0)/2.0); y(i) *= s; } } /// Sparse ICP with point to point /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) /// @param Parameters template <typename Derived1, typename Derived2> void point_to_point(Eigen::MatrixBase<Derived1>& X, Eigen::MatrixBase<Derived2>& Y, Parameters par = Parameters()) { /// Build kd-tree nanoflann::KDTreeAdaptor<Eigen::MatrixBase<Derived2>, 3, nanoflann::metric_L2_Simple> kdtree(Y); /// Buffers Eigen::Matrix3Xd Q = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::Matrix3Xd Z = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::Matrix3Xd C = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::Matrix3Xd Xo1 = X; Eigen::Matrix3Xd Xo2 = X; /// ICP for(int icp=0; icp<par.max_icp; ++icp) { if(par.print_icpn) std::cout << "Iteration #" << icp << "/" << par.max_icp << std::endl; /// Find closest point #pragma omp parallel for for(int i=0; i<X.cols(); ++i) { Q.col(i) = Y.col(kdtree.closest(X.col(i).data())); } /// Computer rotation and translation double mu = par.mu; for(int outer=0; outer<par.max_outer; ++outer) { double dual = 0.0; for(int inner=0; inner<par.max_inner; ++inner) { /// Z update (shrinkage) Z = X-Q+C/mu; shrink<3>(Z, mu, par.p); /// Rotation and translation update Eigen::Matrix3Xd U = Q+Z-C/mu; RigidMotionEstimator::point_to_point(X, U); /// Stopping criteria dual = (X-Xo1).colwise().norm().maxCoeff(); Xo1 = X; if(dual < par.stop) break; } /// C update (lagrange multipliers) Eigen::Matrix3Xd P = X-Q-Z; if(!par.use_penalty) C.noalias() += mu*P; /// mu update (penalty) if(mu < par.max_mu) mu *= par.alpha; /// Stopping criteria double primal = P.colwise().norm().maxCoeff(); if(primal < par.stop && dual < par.stop) break; } /// Stopping criteria double stop = (X-Xo2).colwise().norm().maxCoeff(); Xo2 = X; if(stop < par.stop) break; } } /// Sparse ICP with point to plane /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) /// @param Target normals (one 3D normal per column) /// @param Parameters template <typename Derived1, typename Derived2, typename Derived3> void point_to_plane(Eigen::MatrixBase<Derived1>& X, Eigen::MatrixBase<Derived2>& Y, Eigen::MatrixBase<Derived3>& N, Parameters par = Parameters()) { /// Build kd-tree nanoflann::KDTreeAdaptor<Eigen::MatrixBase<Derived2>, 3, nanoflann::metric_L2_Simple> kdtree(Y); /// Buffers Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::VectorXd Z = Eigen::VectorXd::Zero(X.cols()); Eigen::VectorXd C = Eigen::VectorXd::Zero(X.cols()); Eigen::Matrix3Xd Xo1 = X; Eigen::Matrix3Xd Xo2 = X; /// ICP for(int icp=0; icp<par.max_icp; ++icp) { if(par.print_icpn) std::cout << "Iteration #" << icp << "/" << par.max_icp << std::endl; /// Find closest point #pragma omp parallel for for(int i=0; i<X.cols(); ++i) { int id = kdtree.closest(X.col(i).data()); Qp.col(i) = Y.col(id); Qn.col(i) = N.col(id); } /// Computer rotation and translation double mu = par.mu; for(int outer=0; outer<par.max_outer; ++outer) { double dual = 0.0; for(int inner=0; inner<par.max_inner; ++inner) { /// Z update (shrinkage) Z = (Qn.array()*(X-Qp).array()).colwise().sum().transpose()+C.array()/mu; shrink<3>(Z, mu, par.p); /// Rotation and translation update Eigen::VectorXd U = Z-C/mu; RigidMotionEstimator::point_to_plane(X, Qp, Qn, Eigen::VectorXd::Ones(X.cols()), U); /// Stopping criteria dual = (X-Xo1).colwise().norm().maxCoeff(); Xo1 = X; if(dual < par.stop) break; } /// C update (lagrange multipliers) Eigen::VectorXf P = (Qn.array()*(X-Qp).array()).colwise().sum().transpose()-Z.array(); if(!par.use_penalty) C.noalias() += mu*P; /// mu update (penalty) if(mu < par.max_mu) mu *= par.alpha; /// Stopping criteria double primal = P.array().abs().maxCoeff(); if(primal < par.stop && dual < par.stop) break; } /// Stopping criteria double stop = (X-Xo2).colwise().norm().maxCoeff(); Xo2 = X; if(stop < par.stop) break; } } } /////////////////////////////////////////////////////////////////////////////// /// ICP implementation using iterative reweighting namespace ICP { enum Function { PNORM, TUKEY, FAIR, LOGISTIC, TRIMMED, NONE }; class Parameters { public: Parameters() : f(NONE), p(0.1), max_icp(100), max_outer(100), stop(1e-5) {} /// Parameters Function f; /// robust function type double p; /// paramter of the robust function int max_icp; /// max ICP iteration int max_outer; /// max outer iteration double stop; /// stopping criteria }; /// Weight functions /// @param Residuals /// @param Parameter void uniform_weight(Eigen::VectorXd& r) { r = Eigen::VectorXd::Ones(r.rows()); } /// @param Residuals /// @param Parameter void pnorm_weight(Eigen::VectorXd& r, double p, double reg=1e-8) { for(int i=0; i<r.rows(); ++i) { r(i) = p/(std::pow(r(i),2-p) + reg); } } /// @param Residuals /// @param Parameter void tukey_weight(Eigen::VectorXd& r, double p) { for(int i=0; i<r.rows(); ++i) { if(r(i) > p) r(i) = 0.0; else r(i) = std::pow((1.0 - std::pow(r(i)/p,2.0)), 2.0); } } /// @param Residuals /// @param Parameter void fair_weight(Eigen::VectorXd& r, double p) { for(int i=0; i<r.rows(); ++i) { r(i) = 1.0/(1.0 + r(i)/p); } } /// @param Residuals /// @param Parameter void logistic_weight(Eigen::VectorXd& r, double p) { for(int i=0; i<r.rows(); ++i) { r(i) = (p/r(i))*std::tanh(r(i)/p); } } struct sort_pred { bool operator()(const std::pair<int,double> &left, const std::pair<int,double> &right) { return left.second < right.second; } }; /// @param Residuals /// @param Parameter void trimmed_weight(Eigen::VectorXd& r, double p) { std::vector<std::pair<int, double> > sortedDist(r.rows()); for(int i=0; i<r.rows(); ++i) { sortedDist[i] = std::pair<int, double>(i,r(i)); } std::sort(sortedDist.begin(), sortedDist.end(), sort_pred()); r.setZero(); int nbV = r.rows()*p; for(int i=0; i<nbV; ++i) { r(sortedDist[i].first) = 1.0; } } /// @param Function type /// @param Residuals /// @param Parameter void robust_weight(Function f, Eigen::VectorXd& r, double p) { switch(f) { case PNORM: pnorm_weight(r,p); break; case TUKEY: tukey_weight(r,p); break; case FAIR: fair_weight(r,p); break; case LOGISTIC: logistic_weight(r,p); break; case TRIMMED: trimmed_weight(r,p); break; case NONE: uniform_weight(r); break; default: uniform_weight(r); break; } } /// Reweighted ICP with point to point /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) /// @param Parameters void point_to_point(Eigen::Matrix3Xd& X, Eigen::Matrix3Xd& Y, Parameters par = Parameters()) { /// Build kd-tree nanoflann::KDTreeAdaptor<Eigen::Matrix3Xd, 3, nanoflann::metric_L2_Simple> kdtree(Y); /// Buffers Eigen::Matrix3Xd Q = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols()); Eigen::Matrix3Xd Xo1 = X; Eigen::Matrix3Xd Xo2 = X; /// ICP for(int icp=0; icp<par.max_icp; ++icp) { /// Find closest point #pragma omp parallel for for(int i=0; i<X.cols(); ++i) { Q.col(i) = Y.col(kdtree.closest(X.col(i).data())); } /// Computer rotation and translation for(int outer=0; outer<par.max_outer; ++outer) { /// Compute weights W = (X-Q).colwise().norm(); robust_weight(par.f, W, par.p); /// Rotation and translation update RigidMotionEstimator::point_to_point(X, Q, W); /// Stopping criteria double stop1 = (X-Xo1).colwise().norm().maxCoeff(); Xo1 = X; if(stop1 < par.stop) break; } /// Stopping criteria double stop2 = (X-Xo2).colwise().norm().maxCoeff(); Xo2 = X; if(stop2 < par.stop) break; } } /// Reweighted ICP with point to plane /// @param Source (one 3D point per column) /// @param Target (one 3D point per column) /// @param Target normals (one 3D normal per column) /// @param Parameters template <typename Derived1, typename Derived2, typename Derived3> void point_to_plane(Eigen::MatrixBase<Derived1>& X, Eigen::MatrixBase<Derived2>& Y, Eigen::MatrixBase<Derived3>& N, Parameters par = Parameters()) { /// Build kd-tree nanoflann::KDTreeAdaptor<Eigen::MatrixBase<Derived2>, 3, nanoflann::metric_L2_Simple> kdtree(Y); /// Buffers Eigen::Matrix3Xd Qp = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::Matrix3Xd Qn = Eigen::Matrix3Xd::Zero(3, X.cols()); Eigen::VectorXd W = Eigen::VectorXd::Zero(X.cols()); Eigen::Matrix3Xd Xo1 = X; Eigen::Matrix3Xd Xo2 = X; /// ICP for(int icp=0; icp<par.max_icp; ++icp) { /// Find closest point #pragma omp parallel for for(int i=0; i<X.cols(); ++i) { int id = kdtree.closest(X.col(i).data()); Qp.col(i) = Y.col(id); Qn.col(i) = N.col(id); } /// Computer rotation and translation for(int outer=0; outer<par.max_outer; ++outer) { /// Compute weights W = (Qn.array()*(X-Qp).array()).colwise().sum().abs().transpose(); robust_weight(par.f, W, par.p); /// Rotation and translation update RigidMotionEstimator::point_to_plane(X, Qp, Qn, W); /// Stopping criteria double stop1 = (X-Xo1).colwise().norm().maxCoeff(); Xo1 = X; if(stop1 < par.stop) break; } /// Stopping criteria double stop2 = (X-Xo2).colwise().norm().maxCoeff() ; Xo2 = X; if(stop2 < par.stop) break; } } } /////////////////////////////////////////////////////////////////////////////// #endif
for-5.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ extern void bar(int); void foo (int n) { int i; #pragma omp for schedule(guided) for (i = 0; i < n; ++i) bar(i); } /* { dg-final { scan-tree-dump-times "GOMP_loop_guided_start" 1 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_loop_guided_next" 1 "ompexp" } } */
PReLU.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/PReLU.c" #else void THNN_(PReLU_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight) { THTensor_(resizeAs)(output, input); int64_t nOutputPlane = THTensor_(numel)(weight); if (nOutputPlane == 1) { // handle shared parameter case real w = *THTensor_(data)(weight); TH_TENSOR_APPLY2(real, output, real, input, const real r = (*input_data > 0) ? 1 : w; *output_data = *input_data * r; ); return; } input = THTensor_(newContiguous)(input); int64_t bs = 1, ks = 1; { int64_t input_ndim = THTensor_(nDimensionLegacyAll)(input); if (THTensor_sizeLegacyNoScalars(input, input_ndim > 1) != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, input_ndim > 1)); if (input_ndim > 1) { bs = input->size(0); for (int d = 2; d < input_ndim; d++) { ks *= input->size(d); } } } real *output_data = THTensor_(data)(output); real *input_data = THTensor_(data)(input); real *weight_data = THTensor_(data)(weight); THIndex_t i, j, k; #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { real* n_input_data = input_data + i*nOutputPlane*ks; real* n_output_data = output_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { for (k = 0; k < ks; ++k) n_output_data[k] = (n_input_data[k] > 0) ? n_input_data[k] : weight_data[j] * n_input_data[k]; n_input_data += ks; n_output_data += ks; } } THTensor_(free)(input); } void THNN_(PReLU_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight) { THNN_CHECK_NELEMENT(input, gradOutput); THTensor_(resizeAs)(gradInput, input); int64_t nOutputPlane = THTensor_(numel)(weight); if (nOutputPlane == 1) { real w = THTensor_(data)(weight)[0]; TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, if ((*input_data) > 0) *gradInput_data = *gradOutput_data; else *gradInput_data = w * (*gradOutput_data); ); return; } input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THTensor_(newContiguous)(weight); const real *input_data = THTensor_(data)(input); const real *gradOutput_data = THTensor_(data)(gradOutput); const real *weight_data = THTensor_(data)(weight); real *gradInput_data = THTensor_(data)(gradInput); int64_t bs = 1, ks = 1; { int64_t input_ndim = THTensor_(nDimensionLegacyAll)(input); if (THTensor_sizeLegacyNoScalars(input, input_ndim > 1) != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, input_ndim > 1)); if (input_ndim > 1) { bs = input->size(0); for (int d = 2; d < input_ndim; d++) { ks *= input->size(d); } } } THIndex_t i, j, k; #pragma omp parallel for private(j,k) for (i = 0; i < bs; ++i) { const real *n_input_data = input_data + i*nOutputPlane*ks; const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; real *n_gradInput_data = gradInput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { real w = weight_data[j]; for (k = 0; k < ks; ++k) { if (n_input_data[k] > 0) n_gradInput_data[k] = n_gradOutput_data[k]; else n_gradInput_data[k] = n_gradOutput_data[k] * w; } n_input_data += ks; n_gradInput_data += ks; n_gradOutput_data += ks; } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } void THNN_(PReLU_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *gradWeight, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_CHECK_NELEMENT(input, gradOutput); int64_t nOutputPlane = THTensor_(numel)(weight); if (nOutputPlane == 1) { real *gradWeight_data = THTensor_(data)(gradWeight); real sum = 0; TH_TENSOR_APPLY2(real, input, real, gradOutput, if ((*input_data) <= 0) sum += (*input_data) * (*gradOutput_data); ); gradWeight_data[0] += scale * sum; return; } THArgCheck(THTensor_(isContiguous)(gradWeight), 6, "gradWeight needs to be contiguous"); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THTensor_(newContiguous)(weight); int64_t bs = 1, ks = 1; { int64_t input_ndim = THTensor_(nDimensionLegacyAll)(input); if (THTensor_sizeLegacyNoScalars(input, input_ndim > 1) != nOutputPlane) THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, THTensor_sizeLegacyNoScalars(input, input_ndim > 1)); if (input_ndim > 1) { bs = input->size(0); for (int d = 2; d < input_ndim; d++) { ks *= input->size(d); } } } const real *input_data = THTensor_(data)(input); const real *gradOutput_data = THTensor_(data)(gradOutput); real *gradWeight_data = THTensor_(data)(gradWeight); THIndex_t i, j, k; for (i = 0; i < bs; ++i) { const real *n_input_data = input_data + i*nOutputPlane*ks; const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; for (j = 0; j < nOutputPlane; ++j) { real sum = 0; for (k = 0; k < ks; ++k) if (n_input_data[k] <= 0) sum += n_gradOutput_data[k] * n_input_data[k]; gradWeight_data[j] += scale * sum; n_input_data += ks; n_gradOutput_data += ks; } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } #endif
fft3d.h
/* * fft3d.h * * Copyright (C) 2014 Diamond Light Source * * Author: Richard Gildea * * This code is distributed under the BSD license, a copy of which is * included in the root directory of this package. */ #ifndef DIALS_ALGORITHMS_INTEGRATION_FFT3D_H #define DIALS_ALGORITHMS_INTEGRATION_FFT3D_H #include <stdio.h> #include <iostream> #include <cmath> #include <scitbx/vec2.h> #include <scitbx/array_family/flex_types.h> #include <scitbx/math/utils.h> #include <cstdlib> #include <scitbx/array_family/versa_matrix.h> #include <dials/array_family/scitbx_shared_and_versa.h> #include <dials/algorithms/spot_prediction/rotation_angles.h> #include <dxtbx/model/scan_helpers.h> namespace dials { namespace algorithms { using dxtbx::model::is_angle_in_range; // helper function for sampling_volume_map bool are_angles_in_range(af::ref<vec2<double> > const& angle_ranges, vec2<double> const& angles) { for (std::size_t i = 0; i < 2; i++) { double angle = angles[i]; for (std::size_t j = 0; j < angle_ranges.size(); j++) { if (is_angle_in_range(angle_ranges[j], angle)) { return true; } } } return false; } // compute a map of the sampling volume of a scan void sampling_volume_map(af::ref<double, af::c_grid<3> > const& data, af::ref<vec2<double> > const& angle_ranges, vec3<double> s0, vec3<double> m2, double const& rl_grid_spacing, double d_min, double b_iso) { typedef af::c_grid<3>::index_type index_t; index_t const gridding_n_real = index_t(data.accessor()); RotationAngles calculate_rotation_angles_(s0, m2); double one_over_d_sq_min = 1 / (d_min * d_min); for (std::size_t i = 0; i < gridding_n_real[0]; i++) { double i_rl = (double(i) - double(gridding_n_real[0] / 2.0)) * rl_grid_spacing; double i_rl_sq = i_rl * i_rl; for (std::size_t j = 0; j < gridding_n_real[1]; j++) { double j_rl = (double(j) - double(gridding_n_real[1] / 2.0)) * rl_grid_spacing; double j_rl_sq = j_rl * j_rl; for (std::size_t k = 0; k < gridding_n_real[2]; k++) { double k_rl = (double(k) - double(gridding_n_real[2] / 2.0)) * rl_grid_spacing; double k_rl_sq = k_rl * k_rl; double reciprocal_length_sq = (i_rl_sq + j_rl_sq + k_rl_sq); if (reciprocal_length_sq > one_over_d_sq_min) { continue; } vec3<double> pstar0(i_rl, j_rl, k_rl); // Try to calculate the diffracting rotation angles vec2<double> phi; try { phi = calculate_rotation_angles_(pstar0); } catch (error const&) { continue; } // Check that the angles are within the rotation range if (are_angles_in_range(angle_ranges, phi)) { double T; if (b_iso != 0) { T = std::exp(-b_iso * reciprocal_length_sq / 4); } else { T = 1; } data(i, j, k) = T; } } } } } /* Peak-finding algorithm inspired by the CLEAN algorithm of Högbom, J. A. 1974, A&AS, 15, 417. See also: https://doi.org/10.1051/0004-6361/200912148 */ af::shared<vec3<int> > clean_3d( af::const_ref<double, af::c_grid<3> > const& dirty_beam, af::ref<double, af::c_grid<3> > const& dirty_map, std::size_t n_peaks, double gamma = 1) { af::shared<vec3<int> > peaks; typedef af::c_grid<3>::index_type index_t; index_t const gridding_n_real = index_t(dirty_map.accessor()); DIALS_ASSERT(dirty_map.size() == dirty_beam.size()); double max_db = af::max(dirty_beam); af::c_grid<3> accessor(dirty_map.accessor()); // index_type conversion const int height = int(gridding_n_real[0]); const int depth = int(gridding_n_real[1]); const int width = int(gridding_n_real[2]); const long height_depth = height * depth; int max_idx = af::max_index(dirty_map); for (std::size_t i_peak = 0; i_peak < n_peaks; i_peak++) { // Find the maximum value in the map - this is the next "peak" const index_t shift = accessor.index_nd(max_idx); peaks.push_back(vec3<int>(shift)); // reposition the dirty beam on the current peak and subtract from // the dirty map const double max_value = dirty_map[max_idx]; const double scale = max_value / max_db * gamma; max_idx = 0; // reset for next cycle #pragma omp parallel for for (int i = 0; i < width; i++) { int i_db = i - shift[0]; if (i_db < 0) { i_db += width; } else if (i_db >= width) { i_db -= width; } // DIALS_ASSERT(i_db >= 0 && i_db < width); const long ipart_dm = i * height_depth; const long ipart_db = i_db * height_depth; for (int j = 0; j < height; j++) { int j_db = j - shift[1]; if (j_db < 0) { j_db += height; } else if (j_db >= height) { j_db -= height; } // DIALS_ASSERT(j_db >= 0 && j_db < height); const long ijpart_dm = ipart_dm + j * depth; const long ijpart_db = ipart_db + j_db * depth; for (int k = 0; k < depth; k++) { int k_db = k - shift[2]; if (k_db < 0) { k_db += depth; } else if (k_db >= depth) { k_db -= depth; } // DIALS_ASSERT(k_db >= 0 && k_db < depth); const long idx_dm = ijpart_dm + k; const long idx_db = ijpart_db + k_db; dirty_map[idx_dm] -= dirty_beam[idx_db] * scale; if (dirty_map[max_idx] < dirty_map[idx_dm]) #pragma omp critical(max_idx) { max_idx = idx_dm; } } } } } return peaks; } void map_centroids_to_reciprocal_space_grid( af::ref<double, af::c_grid<3> > const& grid, af::const_ref<vec3<double> > const& reciprocal_space_vectors, af::ref<bool> const& selection, double d_min, double b_iso = 0) { typedef af::c_grid<3>::index_type index_t; index_t const gridding_n_real = index_t(grid.accessor()); DIALS_ASSERT(d_min >= 0); DIALS_ASSERT(gridding_n_real[0] == gridding_n_real[1]); DIALS_ASSERT(gridding_n_real[0] == gridding_n_real[2]); const int n_points = gridding_n_real[0]; const double rlgrid = 2 / (d_min * n_points); const double one_over_rlgrid = 1 / rlgrid; const int half_n_points = n_points / 2; for (int i = 0; i < reciprocal_space_vectors.size(); i++) { if (!selection[i]) { continue; } const vec3<double> v = reciprocal_space_vectors[i]; const double v_length = v.length(); const double d_spacing = 1 / v_length; if (d_spacing < d_min) { selection[i] = false; continue; } vec3<int> coord; for (int j = 0; j < 3; j++) { coord[j] = scitbx::math::iround(v[j] * one_over_rlgrid) + half_n_points; } if ((coord.max() >= n_points) || coord.min() < 0) { selection[i] = false; continue; } double T; if (b_iso != 0) { T = std::exp(-b_iso * v_length * v_length / 4.0); } else { T = 1; } grid(coord) = T; } } }} // namespace dials::algorithms #endif
GB_binop__isne_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc32) // C=scalar+B GB (_bind1st__isne_fc32) // C=scalar+B' GB (_bind1st_tran__isne_fc32) // C=A+scalar GB (_bind2nd__isne_fc32) // C=A'+scalar GB (_bind2nd_tran__isne_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_isne (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC32 || GxB_NO_ISNE_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
atomic_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // #if !defined(KRATOS_ATOMIC_UTILITIES_H_INCLUDED ) #define KRATOS_ATOMIC_UTILITIES_H_INCLUDED // System includes // External includes #ifdef KRATOS_SMP_OPENMP #include <omp.h> #endif // Project includes #include "includes/define.h" namespace Kratos { ///@addtogroup KratosCore /** * collection of utilities for atomic updates of simple types. (essentially mimics the omp atomic) */ /** @param target variable being atomically updated by doing target += value * @param value value being added */ template<class TDataType> inline void AtomicAdd(TDataType& target, const TDataType& value ) { #pragma omp atomic target += value; } /** @param target vector variable being atomically updated by doing target += value * @param value vector value being added * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicAdd(TVectorType1& target, const TVectorType2& value ) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicAdd- Sizes are: " << target.size() << " for target and " << value.size() << " for value " <<std::endl; for(unsigned int i=0; i<target.size(); ++i){ AtomicAdd(target[i], value[i]); } } /** @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TDataType> inline void AtomicSub(TDataType& target, const TDataType& value ) { #pragma omp atomic target -= value; } /** @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicSub(TVectorType1& target, const TVectorType2& value ) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicSub- Sizes are: " << target.size() << " for target and " << value.size() << " for value " <<std::endl; for(unsigned int i=0; i<target.size(); ++i){ AtomicSub(target[i], value[i]); } } } // namespace Kratos. #endif // KRATOS_ATOMIC_UTILITIES_H_INCLUDED defined
GnatNearestNeighbors.h
// // Copyright (c) 2009, Markus Rickert // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // #ifndef RL_MATH_GNATNEARESTNEIGHBORS_H #define RL_MATH_GNATNEARESTNEIGHBORS_H #include <algorithm> #include <iterator> #include <limits> #include <random> #include <type_traits> #include <utility> #include <vector> #include <boost/optional.hpp> namespace rl { namespace math { /** * Geometric Near-Neighbor Access Tree (GNAT). * * Sergey Brin. Near neighbor search in large metric spaces. In Proceedings of * the International Conference on Very Large Data Bases, pages 574-584, * Zurich, Switzerland, September, 1985. * * http://www.vldb.org/conf/1995/P574.PDF */ template<typename MetricT> class GnatNearestNeighbors { private: struct Node; public: typedef const typename MetricT::Value& const_reference; typedef ::std::ptrdiff_t difference_type; typedef typename MetricT::Value& reference; typedef ::std::size_t size_type; typedef typename MetricT::Value value_type; typedef typename MetricT::Distance Distance; typedef MetricT Metric; typedef typename MetricT::Value Value; typedef ::std::pair<Distance, Value> Neighbor; explicit GnatNearestNeighbors(const Metric& metric) : checks(), generator(::std::random_device()()), metric(metric), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(0, 0, nodeDegree, nodeDataMax, true), values(0) { } explicit GnatNearestNeighbors(Metric&& metric = Metric()) : checks(), generator(::std::random_device()()), metric(::std::move(metric)), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(0, 0, nodeDegree, nodeDataMax, true), values(0) { } template<typename InputIterator> GnatNearestNeighbors(InputIterator first, InputIterator last, const Metric& metric) : checks(), generator(::std::random_device()()), metric(metric), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(first, last, 0, 0, nodeDegree, nodeDataMax, true), values(::std::distance(first, last)) { if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree) { this->split(this->root); } } template<typename InputIterator> GnatNearestNeighbors(InputIterator first, InputIterator last, Metric&& metric = Metric()) : checks(), generator(::std::random_device()()), metric(::std::move(metric)), nodeDataMax(50), nodeDegree(8), nodeDegreeMax(12), nodeDegreeMin(4), root(first, last, nullptr, 0, 0, nodeDegree, nodeDataMax, true), values(::std::distance(first, last)) { if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree) { this->split(this->root); } } ~GnatNearestNeighbors() { } void clear() { this->root.children.clear(); this->root.children.reserve(this->nodeDegree); this->root.data.clear(); this->root.data.reserve(this->nodeDataMax + 1); this->values = 0; } ::std::vector<Value> data() const { ::std::vector<Value> data; data.reserve(this->values); this->data(this->root, data); return data; } bool empty() const { return this->root.removed && this->root.data.empty() && this->root.children.empty(); } ::boost::optional<::std::size_t> getChecks() const { return this->checks; } ::std::size_t getNodeDataMax() const { return this->nodeDataMax; } ::std::size_t getNodeDegree() const { return this->nodeDegree; } ::std::size_t getNodeDegreeMax() const { return this->nodeDegreeMax; } ::std::size_t getNodeDegreeMin() const { return this->nodeDegreeMin; } template<typename InputIterator> void insert(InputIterator first, InputIterator last) { if (this->empty()) { this->root.data.insert(this->root.data.end(), first, last); if (this->root.data.size() > this->nodeDataMax && this->root.data.size() > this->root.degree) { this->split(this->root); } this->values += ::std::distance(first, last); } else { for (InputIterator i = first; i != last; ++i) { this->push(*i); } } } ::std::vector<Neighbor> nearest(const Value& query, const ::std::size_t& k, const bool& sorted = true) const { return this->search(query, &k, nullptr, sorted); } void push(const Value& value) { this->push(this->root, value); ++this->values; } ::std::vector<Neighbor> radius(const Value& query, const Distance& radius, const bool& sorted = true) const { return this->search(query, nullptr, &radius, sorted); } void seed(const ::std::mt19937::result_type& value) { this->generator.seed(value); } void setChecks(const ::boost::optional<::std::size_t>& checks) { this->checks = checks; } void setNodeDataMax(const ::std::size_t& nodeDataMax) { this->nodeDataMax = nodeDataMax; } void setNodeDegree(const ::std::size_t& nodeDegree) { this->nodeDegree = nodeDegree; } void setNodeDegreeMax(const ::std::size_t& nodeDegreeMax) { this->nodeDegreeMax = nodeDegreeMax; } void setNodeDegreeMin(const ::std::size_t& nodeDegreeMin) { this->nodeDegreeMin = nodeDegreeMin; } ::std::size_t size() const { return this->values; } void swap(GnatNearestNeighbors& other) { using ::std::swap; swap(this->generator, other.generator); swap(this->metric, other.metric); swap(this->nodeDegree, other.nodeDegree); swap(this->nodeDegreeMax, other.nodeDegreeMax); swap(this->nodeDegreeMin, other.nodeDegreeMin); swap(this->nodeDataMax, other.nodeDataMax); swap(this->root, other.root); swap(this->values, other.values); } friend void swap(GnatNearestNeighbors& lhs, GnatNearestNeighbors& rhs) { lhs.swap(rhs); } protected: private: typedef ::std::pair<Distance, const Node*> Branch; struct BranchCompare { bool operator()(const Branch& lhs, const Branch& rhs) const { return lhs.first - lhs.second->max[lhs.second->index] > rhs.first - rhs.second->max[rhs.second->index]; } }; struct NeighborCompare { bool operator()(const Neighbor& lhs, const Neighbor& rhs) const { return lhs.first < rhs.first; } }; struct Node { Node(const ::std::size_t& index, const ::std::size_t& siblings, const ::std::size_t& degree, const ::std::size_t& capacity, const bool& removed = false) : children(), data(), degree(degree), index(index), max(siblings + 1, -::std::numeric_limits<Distance>::infinity()), min(siblings + 1, ::std::numeric_limits<Distance>::infinity()), pivot(), removed(removed) { this->children.reserve(degree); this->data.reserve(capacity + 1); } template<typename InputIterator> Node(InputIterator first, InputIterator last, const ::std::size_t& index, const ::std::size_t& siblings, const ::std::size_t& degree, const ::std::size_t& capacity, const bool& removed = false) : children(), data(first, last), degree(degree), index(index), max(siblings + 1, -::std::numeric_limits<Distance>::infinity()), min(siblings + 1, ::std::numeric_limits<Distance>::infinity()), pivot(), removed(removed) { this->children.reserve(degree); this->data.reserve(capacity + 1); } ~Node() { } void swap(Node& other) { using ::std::swap; swap(this->children, other.children); swap(this->data, other.data); swap(this->degree, other.degree); swap(this->index, other.index); swap(this->max, other.max); swap(this->min, other.min); swap(this->pivot, other.pivot); swap(this->removed, other.removed); } friend void swap(Node& lhs, Node& rhs) { lhs.swap(rhs); } ::std::vector<Node> children; ::std::vector<Value> data; ::std::size_t degree; ::std::size_t index; ::std::vector<Distance> max; ::std::vector<Distance> min; Value pivot; bool removed; }; void choose(const Node& node, ::std::vector<::std::size_t>& centers, ::std::vector<::std::vector<Distance>>& distances) { ::std::size_t k = node.degree; ::std::vector<Distance> min(node.data.size(), ::std::numeric_limits<Distance>::infinity()); ::std::uniform_int_distribution<::std::size_t> distribution(0, node.data.size() - 1); centers[0] = distribution(this->generator); for (::std::size_t i = 0; i < k - 1; ++i) { Distance max = Distance(); for (::std::size_t j = 0; j < node.data.size(); ++j) { distances[i][j] = j != centers[i] ? this->metric(node.data[j], node.data[centers[i]]) : 0; min[j] = ::std::min(min[j], distances[i][j]); if (min[j] > max) { max = min[j]; centers[i + 1] = j; } } } for (::std::size_t j = 0; j < node.data.size(); ++j) { distances[k - 1][j] = this->metric(node.data[j], node.data[centers[k - 1]]); } } void data(const Node& node, ::std::vector<Value>& data) const { data.insert(data.end(), node.data.begin(), node.data.end()); for (::std::size_t i = 0; i < node.children.size(); ++i) { data.push_back(node.children[i].pivot); this->data(node.children[i], data); } } void push(Node& node, const Value& value) { if (node.children.empty()) { node.data.push_back(value); if (node.data.size() > this->nodeDataMax && node.data.size() > node.degree) { this->split(node); } } else { ::std::vector<Distance> distances(node.children.size()); ::std::size_t index = 0; Distance min = ::std::numeric_limits<Distance>::infinity(); for (::std::size_t i = 0; i < node.children.size(); ++i) { distances[i] = this->metric(value, node.children[i].pivot); if (distances[i] < min) { index = i; min = distances[i]; } } for (::std::size_t i = 0; i < node.children.size(); ++i) { node.children[i].max[index] = ::std::max(node.children[i].max[index], distances[i]); node.children[i].min[index] = ::std::min(node.children[i].min[index], distances[i]); } this->push(node.children[index], value); } } ::std::vector<Neighbor> search(const Value& query, const ::std::size_t* k, const Distance* radius, const bool& sorted) const { ::std::vector<Neighbor> neighbors; if (this->empty()) { return neighbors; } if (nullptr != k) { neighbors.reserve(::std::min(*k, this->size())); } ::std::size_t checks = 0; ::std::vector<Branch> branches; this->search(this->root, query, k, radius, branches, neighbors, checks); while (!branches.empty() && (!this->checks || checks < this->checks)) { Branch branch = ::std::move(branches.front()); ::std::pop_heap(branches.begin(), branches.end(), BranchCompare()); branches.pop_back(); if (nullptr == k || *k == neighbors.size()) { Distance distance = nullptr != radius ? *radius : neighbors.front().first; if (branch.first - distance > branch.second->max[branch.second->index] || branch.first + distance < branch.second->min[branch.second->index]) { continue; } } this->search(*branch.second, query, k, radius, branches, neighbors, checks); } if (sorted) { ::std::sort_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } return neighbors; } void search(const Node& node, const Value& query, const ::std::size_t* k, const Distance* radius, ::std::vector<Branch>& branches, ::std::vector<Neighbor>& neighbors, ::std::size_t& checks) const { if (node.children.empty()) { for (::std::size_t i = 0; i < node.data.size(); ++i) { Distance distance = this->metric(query, node.data[i]); if (nullptr == k || neighbors.size() < *k || distance < neighbors.front().first) { if (nullptr == radius || distance < *radius) { if (nullptr != k && *k == neighbors.size()) { ::std::pop_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); neighbors.pop_back(); } neighbors.emplace_back(::std::piecewise_construct, ::std::forward_as_tuple(distance), ::std::forward_as_tuple(node.data[i])); ::std::push_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } } if (this->checks && ++checks > this->checks) { return; } } } else { ::std::vector<Distance> distances(node.children.size()); ::std::vector<bool> removed(node.children.size(), false); for (::std::size_t i = 0; i < node.children.size(); ++i) { if (!removed[i]) { distances[i] = this->metric(query, node.children[i].pivot); if (!node.children[i].removed) { if (nullptr == k || neighbors.size() < *k || distances[i] < neighbors.front().first) { if (nullptr == radius || distances[i] < *radius) { if (nullptr != k && *k == neighbors.size()) { ::std::pop_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); neighbors.pop_back(); } neighbors.emplace_back(::std::piecewise_construct, ::std::forward_as_tuple(distances[i]), ::std::forward_as_tuple(node.children[i].pivot)); ::std::push_heap(neighbors.begin(), neighbors.end(), NeighborCompare()); } } } if (nullptr == k || *k == neighbors.size()) { Distance distance = nullptr != radius ? *radius : neighbors.front().first; for (::std::size_t j = 0; j < node.children.size(); ++j) { if (i != j && !removed[j]) { if (distances[i] - distance > node.children[i].max[j] || distances[i] + distance < node.children[i].min[j]) { removed[j] = true; } } } } if (this->checks && ++checks > this->checks) { return; } } } for (::std::size_t i = 0; i < node.children.size(); ++i) { if (!removed[i]) { Distance distance = nullptr != radius ? *radius : neighbors.front().first; if (distances[i] - distance <= node.children[i].max[i] && distances[i] + distance >= node.children[i].min[i]) { branches.emplace_back(distances[i], &node.children[i]); ::std::push_heap(branches.begin(), branches.end(), BranchCompare()); } } } } } void split(Node& node) { ::std::vector<::std::vector<Distance>> distances(node.degree, ::std::vector<Distance>(node.data.size())); ::std::vector<::std::size_t> centers(node.degree); this->choose(node, centers, distances); for (::std::size_t i = 0; i < centers.size(); ++i) { node.children.emplace_back(i, node.degree - 1, this->nodeDegree, this->nodeDataMax); node.children[i].pivot = ::std::move(node.data[centers[i]]); } for (::std::size_t i = 0; i < node.data.size(); ++i) { ::std::size_t index = 0; Distance min = ::std::numeric_limits<Distance>::infinity(); for (::std::size_t j = 0; j < centers.size(); ++j) { Distance distance = distances[j][i]; if (distance < min) { index = j; min = distance; } } for (::std::size_t j = 0; j < centers.size(); ++j) { if (i != centers[j]) { node.children[j].max[index] = ::std::max(node.children[j].max[index], distances[j][i]); node.children[j].min[index] = ::std::min(node.children[j].min[index], distances[j][i]); } } if (i != centers[index]) { node.children[index].data.push_back(::std::move(node.data[i])); } } for (::std::size_t i = 0; i < node.children.size(); ++i) { node.children[i].degree = ::std::min(::std::max(this->nodeDegree * node.children[i].data.size() / node.data.size(), this->nodeDegreeMin), this->nodeDegreeMax); if (node.children[i].data.empty()) { node.children[i].max[i] = Distance(); node.children[i].min[i] = Distance(); } } #ifdef _OPENMP ::std::size_t size = node.data.size(); #endif node.data.clear(); node.data.shrink_to_fit(); #ifdef _OPENMP #pragma omp parallel for if (size > 2 * this->nodeDataMax) #if _OPENMP < 200805 for (::std::ptrdiff_t i = 0; i < node.children.size(); ++i) #else for (::std::size_t i = 0; i < node.children.size(); ++i) #endif #else for (::std::size_t i = 0; i < node.children.size(); ++i) #endif { if (node.children[i].data.size() > this->nodeDataMax && node.children[i].data.size() > node.children[i].degree) { this->split(node.children[i]); } } } ::boost::optional<::std::size_t> checks; ::std::mt19937 generator; Metric metric; ::std::size_t nodeDataMax; ::std::size_t nodeDegree; ::std::size_t nodeDegreeMax; ::std::size_t nodeDegreeMin; Node root; ::std::size_t values; }; } } #endif // RL_MATH_GNATNEARESTNEIGHBORS_H
GB_unaryop__identity_int64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_int32 // op(A') function: GB_tran__identity_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_int32 ( int64_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. class OMPUpdateClause : public OMPClause { public: /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// Build an empty clause. OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } public: /// Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Expression associated with the component. Expr *AssociatedExpression = nullptr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { return llvm::makeArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Number of allowed map-type-modifiers. static constexpr unsigned NumberOfModifiers = OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1; private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_use_device_ptr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_is_device_ptr; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) typename Ptr<CLASS>::type #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define OPENMP_CLAUSE(Name, Class) \ RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); } #include "clang/Basic/OpenMPKinds.def" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { default: llvm_unreachable("Unknown clause kind!"); #define OPENMP_CLAUSE(Name, Class) \ case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S)); #include "clang/Basic/OpenMPKinds.def" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = typename std::add_pointer<typename std::add_const<T>::type>; template<class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, std::add_pointer, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S); #include "clang/Basic/OpenMPKinds.def" }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
sageInterface.h
#ifndef ROSE_SAGE_INTERFACE #define ROSE_SAGE_INTERFACE #include "sage3basic.hhh" #include <stdint.h> #include <utility> #include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT #include "OmpAttribute.h" #if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project ); #else SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project ); #endif #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "rewrite.h" #endif // DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser. #include "astUnparseAttribute.h" #include <set> #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "LivenessAnalysis.h" #include "abstract_handle.h" #include "ClassHierarchyGraph.h" #endif // DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h //! A global function for getting the string associated with an enum (which is defined in global scope) ROSE_DLL_API std::string getVariantName (VariantT v); // DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE // This namespace is specific to interface functions that operate on the Sage III AST. // The name was chosen so as not to conflict with other classes within ROSE. // This will become the future home of many interface functions which operate on // the AST and which are generally useful to users. As a namespace multiple files can be used // to represent the compete interface and different developers may contribute interface // functions easily. // Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008) // We could add simpler layers of support for construction of IR nodes by // hiding many details in "makeSg***()" functions. Such functions would // return pointers to the associated Sg*** objects and would be able to hide // many IR specific details, including: // memory handling // optional parameter settings not often required // use of Sg_File_Info objects (and setting them as transformations) // // namespace AST_Interface (this name is taken already by some of Qing's work :-) //! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode() #define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode() /** Functions that are useful when operating on the AST. * * The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate * higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support * numerous types of operations that are common to general analysis and transformation of the AST. */ namespace SageInterface { // Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar. struct Transformation_Record { // a lookup table to check if a for loop has been normalized for its c99-style init-stmt std::map <SgForStatement* , bool > forLoopInitNormalizationTable; // Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair) std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord; } ; ROSE_DLL_API extern Transformation_Record trans_records; // DQ (4/3/2014): Added general AST support separate from the AST. // Container and API for analysis information that is outside of the AST and as a result // prevents frequent modification of the IR. class DeclarationSets { // DQ (4/3/2014): This stores all associated declarations as a map of sets. // the key to the map is the first nondefining declaration and the elements of the set are // all of the associated declarations (including the defining declaration). private: //! Map of first-nondefining declaration to all other associated declarations. std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap; public: void addDeclaration(SgDeclarationStatement* decl); const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl); std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap(); bool isLocatedInDefiningScope(SgDeclarationStatement* decl); }; // DQ (4/3/2014): This constucts a data structure that holds analysis information about // the AST that is seperate from the AST. This is intended to be a general mechanism // to support analysis information without constantly modifing the IR. DeclarationSets* buildDeclarationSets(SgNode*); //! An internal counter for generating unique SgName ROSE_DLL_API extern int gensym_counter; // tps : 28 Oct 2008 - support for finding the main interpretation SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file); //! Get the unsigned value of a disassembled constant. uint64_t getAsmConstant(SgAsmValueExpression* e); //! Get the signed value of a disassembled constant. int64_t getAsmSignedConstant(SgAsmValueExpression *e); //! Function to add "C" style comment to statement. void addMessageStatement( SgStatement* stmt, std::string message ); //! A persistent attribute to represent a unique name for an expression class UniqueNameAttribute : public AstAttribute { private: std::string name; public: UniqueNameAttribute(std::string n="") {name =n; }; void set_name (std::string n) {name = n;}; std::string get_name () {return name;}; }; // DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined // function into the list used to edit the outlined code subtree to fixup references (from symbols // in the original file to the symbols in the newer separate file). // typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType; // void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap ); // CH (4/9/2010): Use boost::hash instead //#ifdef _MSC_VER #if 0 inline size_t hash_value(SgNode* t) {return (size_t)t;} #endif #if 0 // DQ (8/3/2015): We expect that this is not used and is generating a warnings so we // can best fix it by removing it. struct hash_nodeptr { // CH (4/9/2010): Use boost::hash instead //#ifndef _MSC_VER #if 0 //rose_hash::hash<char*> hasher; #endif public: size_t operator()(SgNode* node) const { // CH (4/9/2010): Use boost::hash instead //#ifdef _MSC_VER #if 0 return (size_t) hash_value(node); #else return (size_t) node; #endif } }; #ifndef SWIG // DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time). void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap ); #endif #endif //------------------------------------------------------------------------ //@{ /*! @name Symbol tables \brief utility functions for symbol tables */ // Liao 1/22/2008, used for get symbols for generating variable reference nodes // ! Find a variable symbol in current and ancestor scopes for a given name ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. //! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL. // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList); ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); // DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing. //!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL); // Liao, 1/24/2008, find exact match for a function //!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, const SgType* t, SgScopeStatement *currentScope=NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. // DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support). // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); #if 0 // DQ (8/13/2013): This function does not make since any more, now that we have made the symbol // table handling more precise and we have to provide template parameters for any template lookup. // We also have to know if we want to lookup template classes, template functions, or template // member functions (since each have specific requirements). SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); #endif #if 0 // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. // Where these are called we might not know enough information about the template parameters or function // types, for example. SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); #endif // DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments. // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL); ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL); // DQ (7/17/2011): Added function from cxx branch that I need here for the Java support. // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope); /*! \brief set_name of symbol in symbol table. This function extracts the symbol from the relavant symbol table, changes the name (at the declaration) and reinserts it into the symbol table. \internal I think this is what this function does, I need to double check. */ // DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName // to this location where it can be a part of the interface for the Sage III AST. ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name); /*! \brief Output function type symbols in global function type symbol table. */ void outputGlobalFunctionTypeSymbolTable (); // DQ (6/27/2005): /*! \brief Output the local symbol tables. \implementation Each symbol table is output with the file infor where it is located in the source code. */ ROSE_DLL_API void outputLocalSymbolTables (SgNode * node); class OutputLocalSymbolTables:public AstSimpleProcessing { public: void visit (SgNode * node); }; /*! \brief Regenerate the symbol table. \implementation current symbol table must be NULL pointer before calling this function (for safety, but is this a good idea?) */ // DQ (9/28/2005): void rebuildSymbolTable (SgScopeStatement * scope); /*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted. */ void clearUnusedVariableSymbols (SgNode* root = NULL); // DQ (3/1/2009): //! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table. void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help ); //@} //------------------------------------------------------------------------ //@{ /*! @name Stringify \brief Generate a useful string (name) to describe a SgNode */ /*! \brief Generate a useful name to describe the SgNode \internal default names are used for SgNode objects that can not be associated with a name. */ // DQ (9/21/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgNode * node); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgStatement * stmt); /*! \brief Generate a useful name to describe the expression \internal default names are used for expressions that can not be associated with a name. */ std::string get_name (const SgExpression * expr); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgDeclarationStatement * declaration); /*! \brief Generate a useful name to describe the scope \internal default names are used for scope that cannot be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgScopeStatement * scope); /*! \brief Generate a useful name to describe the SgSymbol \internal default names are used for SgSymbol objects that cannot be associated with a name. */ // DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support). std::string get_name (const SgSymbol * symbol); /*! \brief Generate a useful name to describe the SgType \internal default names are used for SgType objects that cannot be associated with a name. */ std::string get_name (const SgType * type); /*! \brief Generate a useful name to describe the SgSupport IR node */ std::string get_name (const SgSupport * node); /*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node */ std::string get_name (const SgLocatedNodeSupport * node); /*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node */ std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive ); /*! \brief Generate a useful name to describe the SgToken IR node */ std::string get_name ( const SgToken* token ); // DQ (3/20/2016): Added to refactor some of the DSL infrastructure support. /*! \brief Generate a useful name to support construction of identifiers from declarations. This function permits names to be generated that will be unique across translation units (a specific requirement different from the context of the get_name() functions above). \internal This supports only a restricted set of declarations presently. */ std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration ); std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration ); /*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function. */ extern std::map<std::string,int> local_name_collision_map; extern std::map<std::string,SgNode*> local_name_to_node_map; extern std::map<SgNode*,std::string> local_node_to_name_map; /*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function. */ void computeUniqueNameForUseAsIdentifier( SgNode* astNode ); /*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function. */ void reset_name_collision_map(); //@} //------------------------------------------------------------------------ //@{ /*! @name Class utilities \brief */ /*! \brief Get the default destructor from the class declaration */ // DQ (6/21/2005): Get the default destructor from the class declaration SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration * classDeclaration); /*! \brief Get the default constructor from the class declaration */ // DQ (6/22/2005): Get the default constructor from the class declaration ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration * classDeclaration); /*! \brief Return true if template definition is in the class, false if outside of class. */ // DQ (8/27/2005): bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionDeclaration); /*! \brief Generate a non-defining (forward) declaration from a defining function declaration. \internal should put into sageBuilder ? */ // DQ (9/17/2005): SgTemplateInstantiationMemberFunctionDecl* buildForwardFunctionDeclaration (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! Check if a SgNode is a declaration for a structure bool isStructDeclaration(SgNode * node); //! Check if a SgNode is a declaration for a union bool isUnionDeclaration(SgNode * node); #if 0 // DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration // (so that it can handle template functions and member functions) /*! \brief Return true if member function of a template member function, of false if a non-template member function in a templated class. */ // DQ (8/27/2005): bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl * memberFunctionDeclaration); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name Misc. \brief Not sure the classifications right now */ //! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf. void saveToPDF(SgNode* node, std::string filename); void saveToPDF(SgNode* node); // enable calling from gdb // DQ (2/12/2012): Added some diagnostic support. //! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened. void whereAmI(SgNode* node); //! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp". std::string extractPragmaKeyword(const SgPragmaDeclaration *); //! Check if a node is SgOmp*Statement ROSE_DLL_API bool isOmpStatement(SgNode* ); /*! \brief Return true if function is overloaded. */ // DQ (8/27/2005): bool isOverloaded (SgFunctionDeclaration * functionDeclaration); // DQ (2/14/2012): Added support function used for variable declarations in conditionals. //! Support function used for variable declarations in conditionals void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body); //! Support function used for variable declarations in conditionals void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body); //! Support function used for variable declarations in conditionals void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body); //! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute") void annotateExpressionsWithUniqueNames (SgProject* project); //! Check if a SgNode is a main() function declaration ROSE_DLL_API bool isMain (const SgNode* node); // DQ (6/22/2005): /*! \brief Generate unique name from C and C++ constructs. The name may contain space. This is support for the AST merge, but is generally useful as a more general mechanism than name mangling which is more closely ties to the generation of names to support link-time function name resolution. This is more general than common name mangling in that it resolves more relevant differences between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;"). \implementation current work does not support expressions. */ std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations); /** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter. * @param baseName the word to be included in the variable names. */ std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp"); // DQ (8/10/2010): Added const to first parameter. // DQ (3/10/2007): //! Generate a unique string from the source file position information std::string declarationPositionString (const SgDeclarationStatement * declaration); // DQ (1/20/2007): //! Added mechanism to generate project name from list of file names ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false ); //! Given a SgExpression that represents a named function (or bound member //! function), return the mentioned function SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func); //! Get the mask expression from the header of a SgForAllStatement SgExpression* forallMaskExpression(SgForAllStatement* stmt); //! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t); // DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation). /*! \brief Support for faster mangled name generation (caching avoids recomputation). */ #ifndef SWIG // DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time). void clearMangledNameCache (SgGlobal * globalScope); void resetMangledNameCache (SgGlobal * globalScope); #endif std::string getMangledNameFromCache (SgNode * astNode); std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName); SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically //! Used to have a struct declaration embedded into a variable declaration void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl); // DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the // bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration ); //! Check if a defining declaration comes before of after the non-defining declaration. bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration); // DQ (10/19/2006): Function calls have interesting context dependent rules to determine if // they are output with a global qualifier or not. Were this is true we have to avoid global // qualifiers, since the function's scope has not been defined. This is an example of where // qualification of function names in function calls are context dependent; an interesting // example of where the C++ language is not friendly to source-to-source processing :-). bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall); /*! \brief Compute the intersection set for two ASTs. This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST. */ ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL); //! Deep copy an arbitrary subtree ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree); //! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e); template <typename NodeType> NodeType* deepCopy (const NodeType* subtree) { return dynamic_cast<NodeType*>(deepCopyNode(subtree)); } //! Deep copy an expression ROSE_DLL_API SgExpression* copyExpression(SgExpression* e); //!Deep copy a statement ROSE_DLL_API SgStatement* copyStatement(SgStatement* s); // from VarSym.cc in src/midend/astOutlining/src/ASTtools //! Get the variable symbol for the first initialized name of a declaration stmt. ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl); //! Get the first initialized name of a declaration statement ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl); //! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now. ROSE_DLL_API void myRemoveStatement(SgStatement* stmt); ROSE_DLL_API bool isConstantTrue(SgExpression* e); ROSE_DLL_API bool isConstantFalse(SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e); //! Check if a declaration has a "static' modifier bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt); //! Set a declaration as static ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt); //! Check if a declaration has an "extern" modifier ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt); //! Set a declaration as extern ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt); //! Interface for creating a statement whose computation writes its answer into //! a given variable. class StatementGenerator { public: virtual ~StatementGenerator() {}; virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0; }; //! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc) //! //! Return the left hand, right hand expressions and if the left hand variable is also being read bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL); //! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. This function will convert them all to a top level SgInitializedName. ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current); //! Build an abstract handle from an AST node, reuse previously built handle when possible ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*); //! Obtain a matching SgNode from an abstract handle string ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string); //! Dump information about a SgNode for debugging ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc=""); //! Reorder a list of declaration statements based on their appearance order in source files ROSE_DLL_API std::vector<SgDeclarationStatement*> sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec); // DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names. //! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc. // bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp ); bool isPrefixOperator( SgExpression* exp ); //! Check for proper names of possible prefix operators (used in isPrefixOperator()). bool isPrefixOperatorName( const SgName & functionName ); //! Is an overloaded operator a postfix operator. (e.g. ). bool isPostfixOperator( SgExpression* exp ); //! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()). bool isIndexOperator( SgExpression* exp ); // DQ (1/10/2014): Adding more general support for token based unparsing. //! Used to support token unparsing (when the output the trailing token sequence). SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap); //@} //------------------------------------------------------------------------ //@{ /*! @name AST properties \brief version, language properties of current AST. */ // std::string version(); // utility_functions.h, version number /*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use! */ ROSE_DLL_API bool is_C_language (); ROSE_DLL_API bool is_OpenMP_language (); ROSE_DLL_API bool is_UPC_language (); //! Check if dynamic threads compilation is used for UPC programs ROSE_DLL_API bool is_UPC_dynamic_threads(); ROSE_DLL_API bool is_C99_language (); ROSE_DLL_API bool is_Cxx_language (); ROSE_DLL_API bool is_Java_language (); ROSE_DLL_API bool is_Fortran_language (); ROSE_DLL_API bool is_CAF_language (); ROSE_DLL_API bool is_PHP_language(); ROSE_DLL_API bool is_Python_language(); ROSE_DLL_API bool is_Cuda_language(); ROSE_DLL_API bool is_OpenCL_language(); ROSE_DLL_API bool is_X10_language(); ROSE_DLL_API bool is_binary_executable(); ROSE_DLL_API bool is_mixed_C_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_language (); ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language (); //@} //------------------------------------------------------------------------ //@{ /*! @name Scope \brief */ // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Assigns unique numbers to each SgScopeStatement of a function. This is used to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void resetScopeNumbers (SgFunctionDefinition * functionDeclaration); // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Clears the cache of scope,integer pairs for the input function. This is used to clear the cache of computed unique labels for scopes in a function. This function should be called after any transformation on a function that might effect the allocation of scopes and cause the existing unique numbers to be incorrect. This is part of support to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void clearScopeNumbers (SgFunctionDefinition * functionDefinition); //!Find the enclosing namespace of a declaration SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration); // SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); //!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor) bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2); //@} //------------------------------------------------------------------------ //@{ /*! @name Preprocessing Information \brief #if-#else-#end, comments, #include, etc */ //! Dumps a located node's preprocessing information. void dumpPreprocInfo (SgLocatedNode* locatedNode); //! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use. PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader); //! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader); //! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before); //! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX. ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL); //! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon. ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position. ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation. ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo() ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf); //! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes. ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target, const std::string & text, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before); //!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on. ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target); //@} //! Build and attach comment onto the global scope of a source file PreprocessingInfo* attachComment( SgSourceFile * source_file, const std::string & content, PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before ); //! Build and attach comment, comment style is inferred from the language type of the target node if not provided ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before, PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration); // DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes. // Build and attach comment // void attachComment(SgAsmStatement* target, const std::string & content ); // DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface //! Add a string to be unparsed to support code generation for back-end specific tools or compilers. ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation ); /** * Add preproccessor guard around a given node. * It surrounds the node with "#if guard" and "#endif" */ void guardNode(SgLocatedNode * target, std::string guard); //@} //------------------------------------------------------------------------ //@{ /*! @name Source File Position \brief set Sg_File_Info for a SgNode */ // ************************************************************************ // Newer versions of now depricated functions // ************************************************************************ // DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder // interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This // function is the only function that should be called directly (though in a namespace we can't define permissions). //! Set the source code positon for the current (input) node. ROSE_DLL_API void setSourcePosition(SgNode* node); // A better name might be "setSourcePositionForSubTree" //! Set the source code positon for the subtree (including the root). ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root); //! DQ (5/1/2012): New function with improved name. void setSourcePositionAsTransformation(SgNode *node); // DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability). void setSourcePositionPointersToNull(SgNode *node); // ************************************************************************ // ************************************************************************ // Older deprecated functions // ************************************************************************ // Liao, 1/8/2007, set file info. for a whole subtree as transformation generated //! Set current node's source position as transformation generated ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node); //! Set current node's source position as NULL ROSE_DLL_API void setOneSourcePositionNull(SgNode *node); //! Recursively set source position info(Sg_File_Info) as transformation generated ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root); //! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool ROSE_DLL_API void setSourcePositionForTransformation_memoryPool(); //! Check if a node is from a system header file ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node); //! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage. // ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode); // ************************************************************************ //@} //------------------------------------------------------------------------ //@{ /*! @name Data types \brief */ // from src/midend/astInlining/typeTraits.h // src/midend/astUtil/astInterface/AstInterface.h //! Get the right bool type according to C or C++ language input SgType* getBoolType(SgNode* n); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. ////! ////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types ROSE_DLL_API bool isStrictIntegerType(SgType* t); //!Get the data type of the first initialized name of a declaration statement ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl); //! Is a type default constructible? This may not quite work properly. ROSE_DLL_API bool isDefaultConstructible(SgType* type); //! Is a type copy constructible? This may not quite work properly. ROSE_DLL_API bool isCopyConstructible(SgType* type); //! Is a type assignable? This may not quite work properly. ROSE_DLL_API bool isAssignable(SgType* type); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //! Check if a class type is a pure virtual class. True means that there is at least //! one pure virtual function that has not been overridden. //! In the case of an incomplete class type (forward declaration), this function returns false. ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy); #endif //! Does a type have a trivial (built-in) destructor? ROSE_DLL_API bool hasTrivialDestructor(SgType* t); //! Is this type a non-constant reference type? (Handles typedefs correctly) ROSE_DLL_API bool isNonconstReference(SgType* t); //! Is this type a const or non-const reference type? (Handles typedefs correctly) ROSE_DLL_API bool isReferenceType(SgType* t); //! Is this type a pointer type? (Handles typedefs correctly) ROSE_DLL_API bool isPointerType(SgType* t); //! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to //! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile, //! it returns false for (int const * x) and (int const * const x) because these types point to a const int. //! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns //! false for const (int * const * x) ROSE_DLL_API bool isPointerToNonConstType(SgType* type); //! Is this a const type? /* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char. * Similarly, neither for const int b[10]; or const int & c =10; * The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type". */ ROSE_DLL_API bool isConstType(SgType* t); //! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers. SgType* removeConst(SgType* t); //! Is this a volatile type? ROSE_DLL_API bool isVolatileType(SgType* t); //! Is this a restrict type? ROSE_DLL_API bool isRestrictType(SgType* t); //! Is this a scalar type? /*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary */ ROSE_DLL_API bool isScalarType(SgType* t); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. //! //! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool. ROSE_DLL_API bool isStrictIntegerType(SgType* t); //! Check if a type is a struct type (a special SgClassType in ROSE) ROSE_DLL_API bool isStructType(SgType* t); //! Generate a mangled string for a given type based on Itanium C++ ABI ROSE_DLL_API std::string mangleType(SgType* type); //! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE ROSE_DLL_API std::string mangleScalarType(SgType* type); //! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types. ROSE_DLL_API std::string mangleModifierType(SgModifierType* type); //! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array. ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t); //! Get the number of dimensions of an array type ROSE_DLL_API int getDimensionCount(SgType* t); //! Get the element type of an array. It recursively find the base type for multi-dimension array types ROSE_DLL_API SgType* getArrayElementType(SgType* t); //! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion. ROSE_DLL_API SgType* getElementType(SgType* t); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// Note, the first entry of the array is a SgNullExpression, iff the /// first array dimension was not specified. /// \code /// int x[] = { 1, 2, 3 }; /// \endcode /// note, the expression does not have to be a constant /// \code /// int x[i*5]; /// \endcode /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \param varref a reference to an array variable (the variable of type arrtype) /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// If the first array dimension was not specified an expression /// that indicates that size is generated. /// \code /// int x[][3] = { 1, 2, 3, 4, 5, 6 }; /// \endcode /// the entry for the first dimension will be: /// \code /// // 3 ... size of 2nd dimension /// sizeof(x) / (sizeof(int) * 3) /// \endcode /// \pre arrtype is the array-type of varref /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) /// \post !isSgNullExpression(return-value[*]) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref); /// \overload /// \note see get_C_array_dimensions for SgVarRefExp for details. /// \todo make initname const std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname); //! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp. ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL); //! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ; ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList); //! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information. /*! * Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense. AST graph for some examples: - shared scalar: SgModifierType -->base type - shared array: SgArrayType --> SgModiferType --> base type - shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt - shared to private: SgModifierType --> SgPointerType --> base type - private to shared: SgPointerType --> SgModifierType --> base type */ ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL ); //! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property. /*! * ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property. */ ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL); //! Check if a modifier type is a UPC shared type. ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type); //! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array. ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type); //! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.) ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type); //! Get the block size of a UPC shared modifier type ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type); //! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays) ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t); //! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type. ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t); //! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first. ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t); //! Is a UPC array with dimension of X*THREADS ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t); //! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to. ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL); // DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types. //! Get the type of the associated argument expression from the function type. ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression); //! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration) ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2); //! Verify that 2 SgTemplateArgumentPtrList are equivalent. ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2); //! Test for equivalence of types independent of access permissions (private or protected modes for members of classes). ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs); //! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types //! They may differ in one SgTemplateType pointer but identical otherwise. ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs); //@} //------------------------------------------------------------------------ //@{ /*! @name Loop handling \brief */ // by Jeremiah //! Add a step statement to the end of a loop body //! Add a new label to the end of the loop, with the step statement after //! it; then change all continue statements in the old loop body into //! jumps to the label //! //! For example: //! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes //! while (a < 5) {if (a < -3) goto label; label: a++;} ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step); ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f); ROSE_DLL_API void convertForToWhile(SgForStatement* f); ROSE_DLL_API void convertAllForsToWhiles(SgNode* top); //! Change continue statements in a given block of code to gotos to a label ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label); //!Return the loop index variable for a for loop ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop); //!Check if a SgInitializedName is used as a loop index within a AST subtree //! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them. ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root); //! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...) /*! for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0). */ ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop); //! Routines to get and set the body of a loop ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop); ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body); //! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop); //! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop. ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond); //! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested //! //! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL); //! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1 ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/); //! Set the lower bound of a loop header for (i=lb; ...) ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb); //! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...) ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub); //! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc) ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride); //! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop); //! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation. ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop); //! Normalize a for loop, return true if successful. Generated constants will be fold by default. //! //! Translations are : //! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..) //! For test expression: //! i<x is normalized to i<= (x-1) and //! i>x is normalized to i>= (x+1) //! For increment expression: //! i++ is normalized to i+=1 and //! i-- is normalized to i+=-1 //! i-=s is normalized to i+= -s ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true); //! Normalize a for loop's test expression //! i<x is normalized to i<= (x-1) and //! i>x is normalized to i>= (x+1) ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop); ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop); //!Normalize a Fortran Do loop. Make the default increment expression (1) explicit ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop); //! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor. ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor); //! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!). ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder); //! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize); //Winnie Loop Collapsing SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor); bool getForLoopInformations( SgForStatement * for_loop, SgVariableSymbol * & iterator, SgExpression * & lower_bound, SgExpression * & upper_bound, SgExpression * & stride ); //@} //------------------------------------------------------------------------ //@{ /*! @name Topdown search \brief Top-down traversal from current node to find a node of a specified type */ //! Query a subtree to get all nodes of a given type, with an appropriate downcast. template <typename NodeType> std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant) { Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant); std::vector<NodeType*> result(nodes.size(), NULL); int count = 0; for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count) { NodeType* node = dynamic_cast<NodeType*>(*i); ROSE_ASSERT (node); result[count] = node; } return result; } /*! \brief Returns STL vector of SgFile IR node pointers. Demonstrates use of restricted traversal over just SgFile IR nodes. */ std::vector < SgFile * >generateFileList (); /** Get the current SgProject IR Node. * * The library should never have more than one project and it asserts such. If no project has been created yet then this * function returns the null pointer. */ ROSE_DLL_API SgProject * getProject(); //! \return the project associated with a node SgProject * getProject(const SgNode * node); //! Query memory pools to grab SgNode of a specified type template <typename NodeType> static std::vector<NodeType*> getSgNodeListFromMemoryPool() { // This function uses a memory pool traversal specific to the SgFile IR nodes class MyTraversal : public ROSE_VisitTraversal { public: std::vector<NodeType*> resultlist; void visit ( SgNode* node) { NodeType* result = dynamic_cast<NodeType* > (node); ROSE_ASSERT(result!= NULL); if (result!= NULL) { resultlist.push_back(result); } }; virtual ~MyTraversal() {} }; MyTraversal my_traversal; NodeType::traverseMemoryPoolNodes(my_traversal); return my_traversal.resultlist; } /*! \brief top-down traversal from current node to find the main() function declaration */ ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode); //! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context. SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false); //midend/programTransformation/partialRedundancyElimination/pre.h //! Find referenced symbols within an expression std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr); //! Find break statements inside a particular statement, stopping at nested loops or switches /*! loops or switch statements defines their own contexts for break statements. The function will stop immediately if run on a loop or switch statement. If fortranLabel is non-empty, breaks (EXITs) to that label within nested loops are included in the returned list. */ std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = ""); //! Find all continue statements inside a particular statement, stopping at nested loops /*! Nested loops define their own contexts for continue statements. The function will stop immediately if run on a loop statement. If fortranLabel is non-empty, continues (CYCLEs) to that label within nested loops are included in the returned list. */ std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = ""); std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l); std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw); //! Collect all variable references in a subtree void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result); //! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag. template <typename T> T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining) { bool found = false; #if 0 printf ("In findDeclarationStatement(): root = %p \n",root); printf ("In findDeclarationStatement(): name = %s \n",name.c_str()); printf ("In findDeclarationStatement(): scope = %p \n",scope); printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false"); #endif // Do we really want a NULL pointer to be acceptable input to this function? // Maybe we should have an assertion that it is non-null? if (!root) return NULL; T* decl = dynamic_cast<T*>(root); #if 0 printf ("In findDeclarationStatement(): decl = %p \n",decl); #endif if (decl != NULL) { if (scope) { if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name)) { found = true; } } else // Liao 2/9/2010. We should allow NULL scope { #if 0 // DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable. SgSymbol* symbol = decl->search_for_symbol_from_symbol_table(); printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol); printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str()); #endif if (decl->search_for_symbol_from_symbol_table()->get_name() == name) { found = true; } } } if (found) { if (isDefining) { #if 0 printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration()); printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration()); #endif ROSE_ASSERT (decl->get_definingDeclaration() != NULL); #if 0 printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration()); #endif return dynamic_cast<T*> (decl->get_definingDeclaration()); } else { #if 0 printf ("In findDeclarationStatement(): returing decl = %p \n",decl); #endif return decl; } } std::vector<SgNode*> children = root->get_traversalSuccessorContainer(); #if 0 printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size()); #endif // DQ (4/10/2016): Note that if we are searching for a function member that has it's defining // declaration defined outside of the class then it will not be found in the child list. for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i) { T* target = findDeclarationStatement<T> (*i,name,scope,isDefining); if (target) { return target; } } return NULL; } //! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>. SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining); #if 0 //TODO // 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX // until reach the end node SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); // 2. return all nodes of type VariantT following the source node std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name Bottom up search \brief Backwards traverse through the AST to find a node, findEnclosingXXX() */ // remember to put const to all arguments. /** Find a node by type using upward traversal. * * Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant * ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the * starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode. * * For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first * non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining * declaration is different than the first non-defining declaration. * * If no ancestor of the requisite type of subtypes is found then this function returns a null pointer. * * If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot * be an enclosing node of the specified type. */ template <typename NodeType> NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false) { #if 1 // DQ (10/20/2012): This is the older version of this implementation. Until I am sure that // the newer version (below) is what we want to use I will resolve this conflict by keeping // the previous version in place. if (NULL == astNode) { return NULL; } if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) ) { return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode)); } // DQ (3/5/2012): Check for reference to self... ROSE_ASSERT(astNode->get_parent() != astNode); SgNode* parent = astNode->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. SgNode* previouslySeenParent = parent; bool foundCycle = false; while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if 0 printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str()); #endif parent = parent->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. // ROSE_ASSERT(parent != previouslySeenParent); if (parent == previouslySeenParent) { foundCycle = true; } } #if 0 printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif parent = previouslySeenParent; SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if 0 printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p \n",declarationStatement); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the non-defining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } #if 0 printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif // DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for // debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However, // this will have to be revisited later since it appears clear that it is a problem for the binary analysis // work when it is visited for this case. Since the cycle is detected, but there is no assertion on the // cycle, we don't exit when a cycle is identified (which is the point of the code below). // Note also that I have fixed the code (above and below) to only chase pointers through defining // declarations (where they exist), this is important since non-defining declarations can be almost // anywhere (and thus chasing them can make it appear that there are cycles where there are none // (I think); test2012_234.C demonstrates an example of this. // DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work. // if (foundCycle == true) if (foundCycle == false) { while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if 0 printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str()); if (parent->get_file_info() != NULL) parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug"); #endif SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if 0 printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null"); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } parent = parent->get_parent(); #if 1 // DQ (3/5/2012): Check for loops that will cause infinite loops. ROSE_ASSERT(parent != previouslySeenParent); #else printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n"); if (parent == previouslySeenParent) break; #endif } } return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent)); #else // DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below). // Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop). // Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const. SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent()); std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles while (node) { if (NodeType *found = dynamic_cast<NodeType*>(node)) return found; // FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09] ROSE_ASSERT(seen.insert(node).second); // Traverse to parent (declaration statements are a special case) if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) { SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) { // DQ (10/19/2012): Use the defining declaration instead. // node = firstNondefiningDeclaration; node = definingDeclaration; } } else { node = node->get_parent(); } } return NULL; #endif } //! Find enclosing source file node ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false); //! Get the closest scope from astNode. Return astNode if it is already a scope. ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode); //! Get the enclosing scope from a node n ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false); //! Traverse back through a node's parents to find the enclosing global scope ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode); //! Find the function definition ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false); ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false); //! Find the closest enclosing statement, including the given node ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n); //! Find the closest switch outside a given statement (normally used for case and default statements) ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s); //! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly. ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s); //! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false); //! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration. ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false); //roseSupport/utility_functions.h //! get the SgFile node from current node ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode ); //! Get the initializer containing an expression if it is within an initializer. ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n); //! Get the closest class definition enclosing the specified AST node, ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false); // TODO #if 0 SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); SgVariableDeclaration* findVariableDeclaratin( const string& varname) SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode); // e.g. for some expression, find its parent statement SgStatement* getEnclosingStatement(const SgNode* astNode); SgSwitchStatement* getEnclosingSwitch(SgStatement* s); SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode); // used to build a variable reference for compiler generated code in current scope SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name AST Walk and Traversal \brief */ // Liao, 1/9/2008 /*! \brief return the first global scope under current project */ ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project); /*! \brief get the last statement within a scope, return NULL if it does not exit */ ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope); //! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers. ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false); //!Find the first defining function declaration statement in a scope ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope); //! Get next statement within the same scope of current statement ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt); //! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned. ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true); #if 0 //TODO // preorder traversal from current SgNode till find next SgNode of type V_SgXXX SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name AST Comparison \brief Compare AST nodes, subtree, etc */ //! Check if a SgIntVal node has a given value ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value); //! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same. /*! * There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C */ ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2); //! Check if a statement is the last statement within its closed scope ROSE_DLL_API bool isLastStatement(SgStatement* stmt); //@} //------------------------------------------------------------------------ //@{ /*! @name AST insert, removal, and replacement \brief Add, remove,and replace AST scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc. */ // DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining). //! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result. ROSE_DLL_API void deleteAST(SgNode* node); //! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only). ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root); // DQ (2/25/2009): Added new function to support outliner. //! Move statements in first block to the second block (preserves order and rebuilds the symbol table). ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock ); //! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc. ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope); //! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! Append a statement to the end of SgForInitStatement ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt); //! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); // DQ (2/6/2009): Added function to support outlining into separate file. //! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers). ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles ); //! Prepend a statement to the beginning of the current scope, handling side //! effects as appropriate ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! Prepend a statement to the beginning of SgForInitStatement ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt); //! prepend a list of statements to the beginning of the current scope, //! handling side effects as appropriate ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); //! Check if a scope statement has a simple children statement list //! so insert additional statements under the scope is straightforward and unambiguous . //! for example, SgBasicBlock has a simple statement list while IfStmt does not. ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope); //! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before or after the target statement within the //target's scope ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true); //! Insert a statement before a target statement ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before a target statement ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts); //! Insert a statement after a target statement, Move around preprocessing info automatically by default ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements after a target statement ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt); //! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope); //! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope); //! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements // then the statement is inserted at the end of the scope. ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope, bool movePreprocessingInfo=true); //! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements //then the new statements are inserted at the end of the scope. ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope); //! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()). ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true); //! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST() ROSE_DLL_API void deepDelete(SgNode* root); //! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested. ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false); //! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node. ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern); //! Replace all variable references to an old symbol in a scope to being references to a new symbol. // Essentially replace variable a with b. ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope ); /** Given an expression, generates a temporary variable whose initializer optionally evaluates * that expression. Then, the var reference expression returned can be used instead of the original * expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp; * this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles * reference types correctly by using pointer types for the temporary. * @param expression Expression which will be replaced by a variable * @param scope scope in which the temporary variable will be generated * @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed * @return declaration of the temporary variable, and a a variable reference expression to use instead of * the original expression. */ std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression, SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL); /* This function creates a temporary variable for a given expression in the given scope This is different from SageInterface::createTempVariableForExpression in that it does not try to be smart to create pointers to reference types and so on. The tempt is initialized to expression. The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage. @param expression Expression which will be replaced by a variable @param scope scope in which the temporary variable will be generated */ std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression (SgExpression* expression, SgScopeStatement* scope); //! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible /*! We recommend to build SgFunctionParameterList before building a function declaration However, it is still allowed to append new arguments for existing function declarations. \todo function type , function symbol also need attention. */ ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*); //!Prepend an argument to SgFunctionParameterList ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*); //! Append an expression to a SgExprListExp, set the parent pointer also ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*); //! Append an expression list to a SgExprListExp, set the parent pointers also ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&); //! Set parameter list for a function declaration, considering existing parameter list etc. template <class actualFunction> void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) { // TODO consider the difference between C++ and Fortran // fixup the scope of arguments,no symbols for nondefining function declaration's arguments // DQ (11/25/2011): templated function so that we can handle both // SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member // function derived classes). ROSE_ASSERT(func != NULL); ROSE_ASSERT(paralist != NULL); #if 0 // At this point we don't have cerr and endl defined, so comment this code out. // Warn to users if a paralist is being shared if (paralist->get_parent() !=NULL) { cerr << "Waring! Setting a used SgFunctionParameterList to function: " << (func->get_name()).getString()<<endl << " Sharing parameter lists can corrupt symbol tables!"<<endl << " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl; // ROSE_ASSERT(false); } #endif // Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!! if (func->get_parameterList() != NULL) { if (func->get_parameterList() != paralist) { delete func->get_parameterList(); } } func->set_parameterList(paralist); paralist->set_parent(func); // DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node. // This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C, // test2012_81.C and testcode2012_82.C demonstrate this problem. SgInitializedNamePtrList & args = paralist->get_args(); for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++) { (*i)->set_declptr(func); } } //! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer. ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma); //! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept. ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false); //! Replace a given expression with a list of statements produced by a generator ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Similar to replaceExpressionWithStatement, but with more restrictions. //! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc. ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand); //!set left hand operand for binary expressions, transparently downcasting target expressions when necessary ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs); //!set left hand operand for binary expression ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs); //! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly. ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top); // DQ (1/25/2010): Added support for directories //! Move file to be generated in a subdirectory (will be generated by the unparser). ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file ); //! Supporting function to comment relocation in insertStatement() and removeStatement(). ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement); //! Relocate comments and CPP directives from one statement to another. ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement); // DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++ // compiler which does not permit name qualification to be used to support the expression of the namespace // where a template instantiatoon would be places. Such name qualification would also sometimes require // global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be // specific to the GNU compiler versions, at least versions 4.4 through 4.8. //! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations). ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement ); ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node); ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root); // DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions). ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement); //@} //------------------------------------------------------------------------ //@{ /*! @name AST repair, fix, and postprocessing. \brief Mostly used internally when some AST pieces are built without knowing their target scope/parent, especially during bottom-up construction of AST. The associated symbols, parent and scope pointers cannot be set on construction then. A set of utility functions are provided to patch up scope, parent, symbol for them when the target scope/parent become know. */ //! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed. /*! In AST translation, it is possible to build a variable reference before the variable is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders to get the work done. Users should call fixVariableReference() when AST is complete and all variable declarations are in place. */ ROSE_DLL_API int fixVariableReferences(SgNode* root); //!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known. /*! It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general. In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement(). */ ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope. ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope. ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope. ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope); //! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL. ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope); //! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed. ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value); //! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope); //! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST. ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope); // DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing). //! This collects the statements that are marked as transformed (useful in debugging). ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node ); //! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging). ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node ); //! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging). ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node ); //@} //! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope. /*! This function not only set the defining and nondefining links of the newly introduced * function declaration inside a scope, but also update other same function declarations' links * accordingly if there are any. * Assumption: The function has already inserted/appended/prepended into the scope before calling this function. */ ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope); //------------------------------------------------------------------------ //@{ /*! @name Advanced AST transformations, analyses, and optimizations \brief Some complex but commonly used AST transformations. */ //! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++ ROSE_DLL_API bool collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false); //!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars); //!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars); //!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols); //! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref); //! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //!Call liveness analysis on an entire project ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false); //!get liveIn and liveOut variables for a for loop from liveness analysis result liv. ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts); #endif //!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types. ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results); //! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations! /*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */ ROSE_DLL_API void constantFolding(SgNode* r); //!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted. /*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement. */ ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s); //! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments. ROSE_DLL_API void removeJumpsToNextStatement(SgNode*); //! Remove labels which are not targets of any goto statements ROSE_DLL_API void removeUnusedLabels(SgNode* top); //! Remove consecutive labels ROSE_DLL_API void removeConsecutiveLabels(SgNode* top); //! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check. /*! * e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge * if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer) * The original assignment stmt will be removed by default * This function is a bit ambiguous about the merge direction, to be phased out. */ ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true); //! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct. ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true); //! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check. /*! * e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge */ ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt); //! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment. /*! Return the generated assignment statement, if any * e.g. int i =10; becomes int i; i=10; * This can be seen as a normalization of declarations */ ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl); //! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split. ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true); //! Replace an expression with a temporary variable and an assignment statement /*! Add a new temporary variable to contain the value of 'from' Change reference to 'from' to use this new variable Assumptions: 'from' is not within the test of a loop or 'if' not currently traversing 'from' or the statement it is in */ ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = ""); //! Split long expressions into blocks of statements ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr); //! Remove labeled goto statements ROSE_DLL_API void removeLabeledGotos(SgNode* top); //! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label. ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch); //! Check if the body of a 'for' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs); //! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs); //! Check if the body of a 'while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws); //! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws); //! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws); //! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs); //! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs); //! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs); //! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true); //! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos); //! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt); // DQ (1/18/2015): This is added to support better quality token-based unparsing. //! Remove unused basic block IR nodes added as part of normalization. ROSE_DLL_API void cleanupNontransformedBasicBlockNode(); // DQ (1/18/2015): This is added to support better quality token-based unparsing. //! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs. ROSE_DLL_API void recordNormalizations(SgStatement* s); //! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while, //! switch, If, Catch, OmpBodyStmt, etc bool isBodyStatement (SgStatement* s); //! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them. void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true); //! The same as changeAllBodiesToBlocks(SgNode* top). To be phased out. void changeAllLoopBodiesToBlocks(SgNode* top); //! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc. SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt); #if 0 /** If s is the body of a loop, catch, or if statement and is already a basic block, * s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent * (a loop, catch, or if statement, etc). */ SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s); #endif //! Get the constant value from a constant integer expression; abort on //! everything else. Note that signed long longs are converted to unsigned. unsigned long long getIntegerConstantValue(SgValueExp* expr); //! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace. std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt ); //! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned. SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp); //! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref. SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL); /// \brief moves the body of a function f to a new function f`; /// f's body is replaced with code that forwards the call to f`. /// \return a pair indicating the statement containing the call of f` /// and an initialized name refering to the temporary variable /// holding the result of f`. In case f returns void /// the initialized name is NULL. /// \param definingDeclaration the defining function declaration of f /// \param newName the name of function f` /// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; } /// for functions returning void and a value, respectively. /// two function declarations are inserted in f's enclosing scope /// \code /// result_type f`(...); <--- (1) /// result_type f (...) { forward call to f` } /// result_type f`(...) { original code } <--- (2) /// \endcode /// Calls to f are not updated, thus in the transformed code all /// calls will continue calling f (this is also true for /// recursive function calls from within the body of f`). /// After the function has created the wrapper, /// definingDeclaration becomes the wrapper function /// The definition of f` is the next entry in the /// statement list; the forward declaration of f` is the previous /// entry in the statement list. /// \pre definingDeclaration must be a defining declaration of a /// free standing function. /// typeid(SgFunctionDeclaration) == typeid(definingDeclaration) /// i.e., this function is NOT implemented for class member functions, /// template functions, procedures, etc. std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName); /// \overload /// \tparam NameGen functor that generates a new name based on the old name. /// interface: SgName nameGen(const SgName&) /// \param nameGen name generator /// \brief see wrapFunction for details template <class NameGen> std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen) { return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name())); } /// \brief convenience function that returns the first initialized name in a /// list of variable declarations. SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl); //@} // DQ (6/7/2012): Unclear where this function should go... bool hasTemplateSyntax( const SgName & name ); #if 0 //------------------------AST dump, stringify----------------------------- //------------------------------------------------------------------------ std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h // do we need these? std::string dump_node(const SgNode* astNode); std::string dump_tree(const SgNode* astNode); // or a friendly version of unparseToString(), as a memeber function std::string SgNode::toString(bool asSubTree=true); // dump node or subtree //----------------------------AST comparison------------------------------ //------------------------------------------------------------------------ // How to get generic functions for comparison? bool isNodeEqual(SgNode* node1, SgNode* node2); //? bool isTreeEqual(SgNode* tree1, SgNode* tree2); //! Are two expressions equal (using a deep comparison)? bool expressionTreeEqual(SgExpression*, SgExpression*); //! Are corresponding expressions in two lists equal (using a deep comparison)? bool expressionTreeEqualStar(const SgExpressionPtrList&, const SgExpressionPtrList&); //----------------------AST verfication/repair---------------------------- //------------------------------------------------------------------------ // sanity check of AST subtree, any suggestions? // TODO verifySgNode(SgNode* node, bool subTree=true); //src/midend/astDiagnostics/AstConsistencyTests.h // AstTests::runAllTests(SgProject * ) //src/midend/astUtil/astInterface/AstInterface.h.C //FixSgProject(SgProject &project) //FixSgTree(SgNode* r) //src/frontend/SageIII/astPostProcessing //AstPostProcessing(SgNode * node) //--------------------------AST modification------------------------------ //------------------------------------------------------------------------ // any operations changing AST tree, including // insert, copy, delete(remove), replace // insert before or after some point, argument list is consistent with LowLevelRewrite void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true); // previous examples //void myStatementInsert(SgStatement* target,...) // void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock) // copy // copy children of one basic block to another basic block //void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b); void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst); // delete (remove) a node or a whole subtree void removeSgNode(SgNode* targetNode); // need this? void removeSgNodeTree(SgNode* subtree); // need this? void removeStatement( SgStatement* targetStmt); //Move = delete + insert void moveAst (SgNode* src, SgNode* target); // need this? // similar to void moveStatements (SgBasicBlock* src, SgBasicBlock* target); // replace= delete old + insert new (via building or copying) // DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE. // void replaceAst(SgNode* oldNode, SgNode* newNode); //void replaceChild(SgNode* parent, SgNode* from, SgNode* to); //bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n) //--------------------------AST transformations--------------------------- //------------------------------------------------------------------------ // Advanced AST modifications through basic AST modifications // Might not be included in AST utitlity list, but listed here for the record. // extract statements/content from a scope void flattenBlocks(SgNode* n); //src/midend/astInlining/inlinerSupport.h void renameVariables(SgNode* n); void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition); void simpleCopyAndConstantPropagation(SgNode* top); void changeAllMembersToPublic(SgNode* n); void removeVariableDeclaration(SgInitializedName* initname); //! Convert something like "int a = foo();" into "int a; a = foo();" SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init); //! Rewrites a while or for loop so that the official test is changed to //! "true" and what had previously been the test is now an if-break //! combination (with an inverted condition) at the beginning of the loop //! body void pushTestIntoBody(LoopStatement* loopStmt); //programTransformation/finiteDifferencing/finiteDifferencing.h //! Move variables declared in a for statement to just outside that statement. void moveForDeclaredVariables(SgNode* root); //------------------------ Is/Has functions ------------------------------ //------------------------------------------------------------------------ // misc. boolean functions // some of them could moved to SgXXX class as a member function bool isOverloaded (SgFunctionDeclaration * functionDeclaration); bool isSwitchCond (const SgStatement* s); bool isIfCond (const SgStatement* s); bool isWhileCond (const SgStatement* s); bool isStdNamespace (const SgScopeStatement* scope); bool isTemplateInst (const SgDeclarationStatement* decl); bool isCtor (const SgFunctionDeclaration* func); bool isDtor (const SgFunctionDeclaration* func); // src/midend/astInlining/typeTraits.h bool hasTrivialDestructor(SgType* t); ROSE_DLL_API bool isNonconstReference(SgType* t); ROSE_DLL_API bool isReferenceType(SgType* t); // generic ones, or move to the SgXXX class as a member function bool isConst(SgNode* node); // const type, variable, function, etc. // .... and more bool isConstType (const SgType* type); bool isConstFunction (const SgFunctionDeclaration* decl); bool isMemberVariable(const SgInitializedName & var); //bool isMemberVariable(const SgNode& in); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); bool MayRedefined(SgExpression* expr, SgNode* root); // bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h bool hasAddressTaken(SgExpression* expr, SgNode* root); //src/midend/astInlining/inlinerSupport.C // can also classified as topdown search bool containsVariableReference(SgNode* root, SgInitializedName* var); bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var); bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc, SgInitializedName* toCheck, SgInitializedName* lifetime) //src/midend/programTransformation/partialRedundancyElimination/pre.h bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n); //------------------------ loop handling --------------------------------- //------------------------------------------------------------------------ //get and set loop control expressions // 0: init expr, 1: condition expr, 2: stride expr SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt ); int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp); bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref); SgInitializedName * getLoopIndexVar(SgForStatement* forstmt); //------------------------expressions------------------------------------- //------------------------------------------------------------------------ //src/midend/programTransformation/partialRedundancyElimination/pre.h int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root); //src/midend/astInlining/replaceExpressionWithStatement.h void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to); void replaceSubexpressionWithStatement(SgExpression* from, StatementGenerator* to); SgExpression* getRootOfExpression(SgExpression* n); //--------------------------preprocessing info. ------------------------- //------------------------------------------------------------------------ //! Removes all preprocessing information at a given position. void cutPreprocInfo (SgBasicBlock* b, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //! Pastes preprocessing information at the front of a statement. void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf, SgStatement* s); //! Pastes preprocessing information at the back of a statement. void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf, SgStatement* s); /*! * \brief Moves 'before' preprocessing information. * Moves all preprocessing information attached 'before' the source * statement to the front of the destination statement. */ // a generic one for all /// void movePreprocessingInfo(src, dest, RelativePositionType); void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest); void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest); void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest); //--------------------------------operator-------------------------------- //------------------------------------------------------------------------ from transformationSupport.h, not sure if they should be included here /* return enum code for SAGE operators */ operatorCodeType classifyOverloadedOperator(); // transformationSupport.h /*! \brief generates a source code string from operator name. This function returns a string representing the elementwise operator (for primative types) that would be match that associated with the overloaded operator for a user-defined abstractions (e.g. identifyOperator("operator+()") returns "+"). */ std::string stringifyOperator (std::string name); //--------------------------------macro ---------------------------------- //------------------------------------------------------------------------ std::string buildMacro ( std::string s ); //transformationSupport.h //--------------------------------access functions--------------------------- //----------------------------------get/set sth.----------------------------- // several categories: * get/set a direct child/grandchild node or fields * get/set a property flag value * get a descendent child node using preorder searching * get an ancestor node using bottomup/reverse searching // SgName or string? std::string getFunctionName (SgFunctionCallExp* functionCallExp); std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression ); // do we need them anymore? or existing member functions are enought? // a generic one: std::string get_name (const SgNode* node); std::string get_name (const SgDeclarationStatement * declaration); // get/set some property: should moved to SgXXX as an inherent memeber function? // access modifier void setExtern (SgFunctionDeclartion*) void clearExtern() // similarly for other declarations and other properties void setExtern (SgVariableDeclaration*) void setPublic() void setPrivate() #endif // DQ (1/23/2013): Added support for generated a set of source sequence entries. std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode ); //--------------------------------Type Traits (C++)--------------------------- bool HasNoThrowAssign(const SgType * const inputType); bool HasNoThrowCopy(const SgType * const inputType); bool HasNoThrowConstructor(const SgType * const inputType); bool HasTrivialAssign(const SgType * const inputType); bool HasTrivialCopy(const SgType * const inputType); bool HasTrivialConstructor(const SgType * const inputType); bool HasTrivialDestructor(const SgType * const inputType); bool HasVirtualDestructor(const SgType * const inputType); bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType); bool IsAbstract(const SgType * const inputType); bool IsClass(const SgType * const inputType); bool IsEmpty(const SgType * const inputType); bool IsEnum(const SgType * const inputType); bool IsPod(const SgType * const inputType); bool IsPolymorphic(const SgType * const inputType); bool IsStandardLayout(const SgType * const inputType); bool IsLiteralType(const SgType * const inputType); bool IsTrivial(const SgType * const inputType); bool IsUnion(const SgType * const inputType); SgType * UnderlyingType(SgType *type); // DQ (3/2/2014): Added a new interface function (used in the snippet insertion support). void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList ); // DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators. bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 ); // JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface /*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */ struct const_int_expr_t { size_t value_; bool hasValue_; }; /*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */ struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr); // JP (9/17/14): Added function to test whether two SgType* are equivalent or not bool checkTypesAreEqual(SgType *typeA, SgType *typeB); //--------------------------------Java interface functions --------------------- #ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT ROSE_DLL_API std::string getTempDirectory(SgProject *project); ROSE_DLL_API void destroyTempDirectory(std::string); ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false); ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string); ROSE_DLL_API std::string preprocessImport(SgProject *, std::string); ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true); ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string); ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *); #endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT // DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters. //! This function detects template instantiations that are relevant when filters are used. /*! EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration). ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization. */ template < class T > bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter ) { // DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized // function template instnatiations (which come from normalized template functions and member functions). // Note that because of the EDG normailzation the membr function is moved outside of the class, and // thus marked as compiler generated. However the template instantiations are always marked as compiler // generated (if not specializations) and so we want to include a template instantiation that is marked // as compiler generated, but is from a template declaration that satisfyied a specific user defined filter. // The complexity of this detection is isolated here, but knowing that it must be called is more complex. // This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis. bool retval = false; #define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0 #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str()); #endif // Test for this to be a template instantation (in which case it was marked as // compiler generated but we may want to allow it to be used in the call graph, // if it's template was a part was defined in the current directory). SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function); SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function); if (templateInstantiationFunction != NULL) { // When the defining function has been normalized by EDG, only the non-defining declaration will have a source position. templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration()); SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration(); if (templateFunctionDeclaration != NULL) { retval = filter->operator()(templateFunctionDeclaration); } else { // Assume false. } #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false"); #endif } else { if (templateInstantiationMemberFunction != NULL) { // When the defining function has been normalized by EDG, only the non-defining declaration will have a source position. templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration()); SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration(); if (templateMemberFunctionDeclaration != NULL) { retval = filter->operator()(templateMemberFunctionDeclaration); } else { // Assume false. } #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false"); #endif } } return retval; } }// end of namespace #endif
bitshuffle.c
/* * Bitshuffle - Filter for improving compression of typed binary data. * * Author: Kiyoshi Masui <kiyo@physics.ubc.ca> * Website: http://www.github.com/kiyo-masui/bitshuffle * Created: 2014 * * See LICENSE file for details about copyright and rights to use. * */ #include "bitshuffle.h" #include "iochain.h" #include "lz4.h" #include <stdio.h> #include <string.h> #if defined(__AVX2__) && defined (__SSE2__) #define USEAVX2 #endif #if defined(__SSE2__) #define USESSE2 #endif // Conditional includes for SSE2 and AVX2. #ifdef USEAVX2 #include <immintrin.h> #elif defined USESSE2 #include <emmintrin.h> #endif // Constants. #define BSHUF_MIN_RECOMMEND_BLOCK 128 #define BSHUF_BLOCKED_MULT 8 // Block sizes must be multiple of this. #define BSHUF_TARGET_BLOCK_SIZE_B 8192 // Use fast decompression instead of safe decompression for LZ4. #define BSHUF_LZ4_DECOMPRESS_FAST // Macros. #define CHECK_MULT_EIGHT(n) if (n % 8) return -80; #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) #define CHECK_ERR(count) if (count < 0) { return count; } #define CHECK_ERR_FREE(count, buf) if (count < 0) { free(buf); return count; } #define CHECK_ERR_FREE_LZ(count, buf) if (count < 0) { \ free(buf); return count - 1000; } /* ---- Functions indicating compile time instruction set. ---- */ int bshuf_using_SSE2(void) { #ifdef USESSE2 return 1; #else return 0; #endif } int bshuf_using_AVX2(void) { #ifdef USEAVX2 return 1; #else return 0; #endif } /* ---- Worker code not requiring special instruction sets. ---- * * The following code does not use any x86 specific vectorized instructions * and should compile on any machine * */ /* Transpose 8x8 bit array packed into a single quadword *x*. * *t* is workspace. */ #define TRANS_BIT_8X8(x, t) { \ t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \ x = x ^ t ^ (t << 7); \ t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \ x = x ^ t ^ (t << 14); \ t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \ x = x ^ t ^ (t << 28); \ } /* Transpose of an array of arbitrarily typed elements. */ #define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \ size_t ii, jj, kk; \ type_t* in_type = (type_t*) in; \ type_t* out_type = (type_t*) out; \ for(ii = 0; ii + 7 < lda; ii += 8) { \ for(jj = 0; jj < ldb; jj++) { \ for(kk = 0; kk < 8; kk++) { \ out_type[jj*lda + ii + kk] = \ in_type[ii*ldb + kk * ldb + jj]; \ } \ } \ } \ for(ii = lda - lda % 8; ii < lda; ii ++) { \ for(jj = 0; jj < ldb; jj++) { \ out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \ } \ } \ } /* Memory copy with bshuf call signature. For testing and profiling. */ int64_t bshuf_copy(void* in, void* out, const size_t size, const size_t elem_size) { char* in_b = (char*) in; char* out_b = (char*) out; memcpy(out_b, in_b, size * elem_size); return size * elem_size; } /* Transpose bytes within elements, starting partway through input. */ int64_t bshuf_trans_byte_elem_remainder(void* in, void* out, const size_t size, const size_t elem_size, const size_t start) { size_t ii, jj, kk; char* in_b = (char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(start); if (size > start) { // ii loop separated into 2 loops so the compiler can unroll // the inner one. for (ii = start; ii + 7 < size; ii += 8) { for (jj = 0; jj < elem_size; jj++) { for (kk = 0; kk < 8; kk++) { out_b[jj * size + ii + kk] = in_b[ii * elem_size + kk * elem_size + jj]; } } } for (ii = size - size % 8; ii < size; ii ++) { for (jj = 0; jj < elem_size; jj++) { out_b[jj * size + ii] = in_b[ii * elem_size + jj]; } } } return size * elem_size; } /* Transpose bytes within elements. */ int64_t bshuf_trans_byte_elem_scal(void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0); } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_remainder(void* in, void* out, const size_t size, const size_t elem_size, const size_t start_byte) { int ii, kk; uint64_t* in_b = in; uint8_t* out_b = out; uint64_t x, t; size_t nbyte = elem_size * size; size_t nbyte_bitrow = nbyte / 8; CHECK_MULT_EIGHT(nbyte); CHECK_MULT_EIGHT(start_byte); for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) { x = in_b[ii]; TRANS_BIT_8X8(x, t); for (kk = 0; kk < 8; kk ++) { out_b[kk * nbyte_bitrow + ii] = x; x = x >> 8; } } return size * elem_size; } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_scal(void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0); } /* General transpose of an array, optimized for large element sizes. */ int64_t bshuf_trans_elem(void* in, void* out, const size_t lda, const size_t ldb, const size_t elem_size) { size_t ii, jj; char* in_b = (char*) in; char* out_b = (char*) out; for(ii = 0; ii < lda; ii++) { for(jj = 0; jj < ldb; jj++) { memcpy(&out_b[(jj*lda + ii) * elem_size], &in_b[(ii*ldb + jj) * elem_size], elem_size); } } return lda * ldb * elem_size; } /* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */ int64_t bshuf_trans_bitrow_eight(void* in, void* out, const size_t size, const size_t elem_size) { size_t nbyte_bitrow = size / 8; CHECK_MULT_EIGHT(size); return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow); } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_scal(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_scal(void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj, kk; char* in_b = (char*) in; char* out_b = (char*) out; size_t nbyte_row = size / 8; CHECK_MULT_EIGHT(size); for (jj = 0; jj < elem_size; jj++) { for (ii = 0; ii < nbyte_row; ii++) { for (kk = 0; kk < 8; kk++) { out_b[ii * 8 * elem_size + jj * 8 + kk] = \ in_b[(jj * 8 + kk) * nbyte_row + ii]; } } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_scal(void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); size_t ii, jj, kk; char* in_b = (char*) in; char* out_b = (char*) out; size_t nbyte = elem_size * size; uint64_t x, t; for (jj = 0; jj < 8 * elem_size; jj += 8) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { x = *((uint64_t*) &in_b[ii + jj]); TRANS_BIT_8X8(x, t); for (kk = 0; kk < 8; kk++) { *((uint8_t*) &out_b[ii + jj / 8 + kk * elem_size]) = x; x = x >> 8; } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_scal(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* ---- Worker code that uses SSE2 ---- * * The following code makes use of the SSE2 instruction set and specialized * 16 byte registers. The SSE2 instructions are present on modern x86 * processors. The first Intel processor microarchitecture supporting SSE2 was * Pentium 4 (2000). * */ #ifdef USESSE2 /* Transpose bytes within elements for 16 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_16(void* in, void* out, const size_t size) { size_t ii; char* in_b = (char*) in; char* out_b = (char*) out; __m128i a0, b0, a1, b1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); } return bshuf_trans_byte_elem_remainder(in, out, size, 2, size - size % 16); } /* Transpose bytes within elements for 32 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_32(void* in, void* out, const size_t size) { size_t ii; char* in_b = (char*) in; char* out_b = (char*) out; __m128i a0, b0, c0, d0, a1, b1, c1, d1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi64(a1, c1); b0 = _mm_unpackhi_epi64(a1, c1); c0 = _mm_unpacklo_epi64(b1, d1); d0 = _mm_unpackhi_epi64(b1, d1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); } return bshuf_trans_byte_elem_remainder(in, out, size, 4, size - size % 16); } /* Transpose bytes within elements for 64 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_64(void* in, void* out, const size_t size) { size_t ii; char* in_b = (char*) in; char* out_b = (char*) out; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]); e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]); f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]); g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]); h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); e1 = _mm_unpacklo_epi8(e0, f0); f1 = _mm_unpackhi_epi8(e0, f0); g1 = _mm_unpacklo_epi8(g0, h0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); e0 = _mm_unpacklo_epi8(e1, f1); f0 = _mm_unpackhi_epi8(e1, f1); g0 = _mm_unpacklo_epi8(g1, h1); h0 = _mm_unpackhi_epi8(g1, h1); a1 = _mm_unpacklo_epi32(a0, c0); b1 = _mm_unpackhi_epi32(a0, c0); c1 = _mm_unpacklo_epi32(b0, d0); d1 = _mm_unpackhi_epi32(b0, d0); e1 = _mm_unpacklo_epi32(e0, g0); f1 = _mm_unpackhi_epi32(e0, g0); g1 = _mm_unpacklo_epi32(f0, h0); h1 = _mm_unpackhi_epi32(f0, h0); a0 = _mm_unpacklo_epi64(a1, e1); b0 = _mm_unpackhi_epi64(a1, e1); c0 = _mm_unpacklo_epi64(b1, f1); d0 = _mm_unpackhi_epi64(b1, f1); e0 = _mm_unpacklo_epi64(c1, g1); f0 = _mm_unpackhi_epi64(c1, g1); g0 = _mm_unpacklo_epi64(d1, h1); h0 = _mm_unpackhi_epi64(d1, h1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); _mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0); _mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0); _mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0); _mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0); } return bshuf_trans_byte_elem_remainder(in, out, size, 8, size - size % 16); } /* Transpose bytes within elements using best SSE algorithm available. */ int64_t bshuf_trans_byte_elem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; // Trivial cases: power of 2 bytes. switch (elem_size) { case 1: count = bshuf_copy(in, out, size, elem_size); return count; case 2: count = bshuf_trans_byte_elem_SSE_16(in, out, size); return count; case 4: count = bshuf_trans_byte_elem_SSE_32(in, out, size); return count; case 8: count = bshuf_trans_byte_elem_SSE_64(in, out, size); return count; } // Worst case: odd number of bytes. Turns out that this is faster for // (odd * 2) byte elements as well (hence % 4). if (elem_size % 4) { count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); return count; } // Multiple of power of 2: transpose hierarchically. { size_t nchunk_elem; void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; if ((elem_size % 8) == 0) { nchunk_elem = elem_size / 8; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t); count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size); } else if ((elem_size % 4) == 0) { nchunk_elem = elem_size / 4; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t); count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size); } else { // Not used since scalar algorithm is faster. nchunk_elem = elem_size / 2; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t); count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size); } free(tmp_buf); return count; } } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_SSE(void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; char* in_b = (char*) in; char* out_b = (char*) out; uint16_t* out_ui16; int64_t count; size_t nbyte = elem_size * size; CHECK_MULT_EIGHT(nbyte); __m128i xmm; int32_t bt; for (ii = 0; ii + 15 < nbyte; ii += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_ui16 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 16); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_SSE(void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj; char* in_b = (char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; __m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs; for (ii = 0; ii + 7 < nrows; ii += 8) { for (jj = 0; jj + 15 < nbyte_row; jj += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]); b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]); c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]); d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]); e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]); f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]); g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]); h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpacklo_epi8(c0, d0); c1 = _mm_unpacklo_epi8(e0, f0); d1 = _mm_unpacklo_epi8(g0, h0); e1 = _mm_unpackhi_epi8(a0, b0); f1 = _mm_unpackhi_epi8(c0, d0); g1 = _mm_unpackhi_epi8(e0, f0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi16(a1, b1); b0 = _mm_unpacklo_epi16(c1, d1); c0 = _mm_unpackhi_epi16(a1, b1); d0 = _mm_unpackhi_epi16(c1, d1); e0 = _mm_unpacklo_epi16(e1, f1); f0 = _mm_unpacklo_epi16(g1, h1); g0 = _mm_unpackhi_epi16(e1, f1); h0 = _mm_unpackhi_epi16(g1, h1); a1 = _mm_unpacklo_epi32(a0, b0); b1 = _mm_unpackhi_epi32(a0, b0); c1 = _mm_unpacklo_epi32(c0, d0); d1 = _mm_unpackhi_epi32(c0, d0); e1 = _mm_unpacklo_epi32(e0, f0); f1 = _mm_unpackhi_epi32(e0, f0); g1 = _mm_unpacklo_epi32(g0, h0); h1 = _mm_unpackhi_epi32(g0, h0); // We don't have a storeh instruction for integers, so interpret // as a float. Have a storel (_mm_storel_epi64). as = (__m128 *) &a1; bs = (__m128 *) &b1; cs = (__m128 *) &c1; ds = (__m128 *) &d1; es = (__m128 *) &e1; fs = (__m128 *) &f1; gs = (__m128 *) &g1; hs = (__m128 *) &h1; _mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as); _mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs); _mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs); _mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds); _mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es); _mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs); _mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs); _mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs); _mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as); _mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs); _mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs); _mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds); _mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es); _mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs); _mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs); _mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs); } for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj]; out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj]; out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj]; out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj]; out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj]; out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj]; out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj]; out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. char* in_b = (char*) in; uint16_t* out_ui16 = (uint16_t*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m128i xmm; int32_t bt; if (elem_size % 2) { bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size); } else { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); out_ui16[ind / 2] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USESSE2 int64_t bshuf_untrans_bit_elem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_elem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_bitrow_SSE(void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_byte_SSE(void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_64(void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_32(void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_16(void* in, void* out, const size_t size) { return -11; } int64_t bshuf_shuffle_bit_eightelem_SSE(void* in, void* out, const size_t size, const size_t elem_size) { return -11; } #endif // #ifdef USESSE2 /* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */ /* ---- Worker code that uses AVX2 ---- * * The following code makes use of the AVX2 instruction set and specialized * 32 byte registers. The AVX2 instructions are present on newer x86 * processors. The first Intel processor microarchitecture supporting AVX2 was * Haswell (2013). * */ #ifdef USEAVX2 /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_AVX(void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; char* in_b = (char*) in; char* out_b = (char*) out; int32_t* out_i32; size_t nbyte = elem_size * size; int64_t count; __m256i ymm; int32_t bt; for (ii = 0; ii + 31 < nbyte; ii += 32) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_i32 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 32); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_AVX(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_AVX(void* in, void* out, const size_t size, const size_t elem_size) { size_t hh, ii, jj, kk, mm; char* in_b = (char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size, elem_size); __m256i ymm_0[8]; __m256i ymm_1[8]; __m256i ymm_storeage[8][4]; for (jj = 0; jj + 31 < nbyte_row; jj += 32) { for (ii = 0; ii + 3 < elem_size; ii += 4) { for (hh = 0; hh < 4; hh ++) { for (kk = 0; kk < 8; kk ++){ ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[ (ii * 8 + hh * 8 + kk) * nbyte_row + jj]); } for (kk = 0; kk < 4; kk ++){ ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 2; kk ++){ for (mm = 0; mm < 2; mm ++){ ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); } } for (kk = 0; kk < 4; kk ++){ ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 8; kk ++){ ymm_storeage[kk][hh] = ymm_1[kk]; } } for (mm = 0; mm < 8; mm ++) { for (kk = 0; kk < 4; kk ++){ ymm_0[kk] = ymm_storeage[mm][kk]; } ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]); ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]); ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]); ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]); ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32); ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32); ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49); ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]); } } } for (ii = 0; ii < nrows; ii ++ ) { for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_AVX(void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. char* in_b = (char*) in; char* out_b = (char*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m256i ymm; int32_t bt; if (elem_size % 4) { return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size); } else { for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); * (int32_t *) &out_b[ind] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_AVX(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USEAVX2 int64_t bshuf_trans_bit_byte_AVX(void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_bit_elem_AVX(void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_byte_bitrow_AVX(void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_shuffle_bit_eightelem_AVX(void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_untrans_bit_elem_AVX(void* in, void* out, const size_t size, const size_t elem_size) { return -12; } #endif // #ifdef USEAVX2 /* ---- Drivers selecting best instruction set at compile time. ---- */ int64_t bshuf_trans_bit_elem(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size); #else count = bshuf_trans_bit_elem_scal(in, out, size, elem_size); #endif return count; } int64_t bshuf_untrans_bit_elem(void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size); #else count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size); #endif return count; } /* ---- Wrappers for implementing blocking ---- */ /* Function definition for worker functions that process a single block. */ typedef int64_t (*bshufBlockFunDef)(ioc_chain* C_ptr, const size_t size, const size_t elem_size); /* Wrap a function for processing a single block to process an entire buffer in * parallel. */ int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { size_t ii; ioc_chain C; ioc_init(&C, in, out); int64_t err = 0, count, cum_count = 0; size_t last_block_size; if (block_size == 0) { block_size = bshuf_default_block_size(elem_size); } if (block_size < 0 || block_size % BSHUF_BLOCKED_MULT) return -81; #pragma omp parallel for private(count) reduction(+ : cum_count) for (ii = 0; ii < size / block_size; ii ++) { count = fun(&C, block_size, elem_size); if (count < 0) err = count; cum_count += count; } last_block_size = size % block_size; last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT; if (last_block_size) { count = fun(&C, last_block_size, elem_size); if (count < 0) err = count; cum_count += count; } if (err < 0) return err; size_t leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size; size_t this_iter; char *last_in = (char *) ioc_get_in(&C, &this_iter); ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes)); char *last_out = (char *) ioc_get_out(&C, &this_iter); ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes)); memcpy(last_out, last_in, leftover_bytes); ioc_destroy(&C); return cum_count + leftover_bytes; } /* Bitshuffle a single block. */ int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, const size_t size, const size_t elem_size) { size_t this_iter; void *in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); void *out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); int64_t count = bshuf_trans_bit_elem(in, out, size, elem_size); return count; } /* Bitunshuffle a single block. */ int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, const size_t size, const size_t elem_size) { size_t this_iter; void *in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); void *out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); int64_t count = bshuf_untrans_bit_elem(in, out, size, elem_size); return count; } /* Write a 64 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint64_BE(void* buf, uint64_t num) { int ii; uint8_t* b = buf; uint64_t pow28 = 1 << 8; for (ii = 7; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 64 bit unsigned integer from a buffer big endian order. */ uint64_t bshuf_read_uint64_BE(void* buf) { int ii; uint8_t* b = buf; uint64_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 7; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* Write a 32 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint32_BE(void* buf, uint32_t num) { int ii; uint8_t* b = buf; uint32_t pow28 = 1 << 8; for (ii = 3; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 32 bit unsigned integer from a buffer big endian order. */ uint32_t bshuf_read_uint32_BE(void* buf) { int ii; uint8_t* b = buf; uint32_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 3; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* Bitshuffle and compress a single block. */ int64_t bshuf_compress_lz4_block(ioc_chain *C_ptr, const size_t size, const size_t elem_size) { int64_t nbytes, count; void* tmp_buf_bshuf = malloc(size * elem_size); if (tmp_buf_bshuf == NULL) return -1; void* tmp_buf_lz4 = malloc(LZ4_compressBound(size * elem_size)); if (tmp_buf_lz4 == NULL){ free(tmp_buf_bshuf); return -1; } size_t this_iter; void *in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); count = bshuf_trans_bit_elem(in, tmp_buf_bshuf, size, elem_size); if (count < 0) { free(tmp_buf_lz4); free(tmp_buf_bshuf); return count; } nbytes = LZ4_compress(tmp_buf_bshuf, tmp_buf_lz4, size * elem_size); free(tmp_buf_bshuf); CHECK_ERR_FREE_LZ(nbytes, tmp_buf_lz4); void *out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + nbytes + 4)); bshuf_write_uint32_BE(out, nbytes); memcpy((char *) out + 4, tmp_buf_lz4, nbytes); free(tmp_buf_lz4); return nbytes + 4; } /* Decompress and bitunshuffle a single block. */ int64_t bshuf_decompress_lz4_block(ioc_chain *C_ptr, const size_t size, const size_t elem_size) { int64_t nbytes, count; size_t this_iter; void *in = ioc_get_in(C_ptr, &this_iter); int32_t nbytes_from_header = bshuf_read_uint32_BE(in); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + nbytes_from_header + 4)); void *out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; #ifdef BSHUF_LZ4_DECOMPRESS_FAST nbytes = LZ4_decompress_fast((char*) in + 4, tmp_buf, size * elem_size); CHECK_ERR_FREE_LZ(nbytes, tmp_buf); if (nbytes != nbytes_from_header) { free(tmp_buf); return -91; } #else nbytes = LZ4_decompress_safe((char*) in + 4, tmp_buf, nbytes_from_header, size * elem_size); CHECK_ERR_FREE_LZ(nbytes, tmp_buf); if (nbytes != size * elem_size) { free(tmp_buf); return -91; } nbytes = nbytes_from_header; #endif count = bshuf_untrans_bit_elem(tmp_buf, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); nbytes += 4; free(tmp_buf); return nbytes; } /* ---- Public functions ---- * * See header file for description and usage. * */ size_t bshuf_default_block_size(const size_t elem_size) { // This function needs to be absolutely stable between versions. // Otherwise encoded data will not be decodable. size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size; // Ensure it is a required multiple. block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT; return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK); } size_t bshuf_compress_lz4_bound(const size_t size, const size_t elem_size, size_t block_size) { size_t bound, leftover; if (block_size == 0) { block_size = bshuf_default_block_size(elem_size); } if (block_size < 0 || block_size % BSHUF_BLOCKED_MULT) return -81; // Note that each block gets a 4 byte header. // Size of full blocks. bound = (LZ4_compressBound(block_size * elem_size) + 4) * (size / block_size); // Size of partial blocks, if any. leftover = ((size % block_size) / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT; if (leftover) bound += LZ4_compressBound(leftover * elem_size) + 4; // Size of uncompressed data not fitting into any blocks. bound += (size % BSHUF_BLOCKED_MULT) * elem_size; return bound; } int64_t bshuf_bitshuffle(void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size, elem_size, block_size); } int64_t bshuf_bitunshuffle(void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size, elem_size, block_size); } int64_t bshuf_compress_lz4(void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_compress_lz4_block, in, out, size, elem_size, block_size); } int64_t bshuf_decompress_lz4(void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_decompress_lz4_block, in, out, size, elem_size, block_size); } #undef TRANS_BIT_8X8 #undef TRANS_ELEM_TYPE #undef MIN #undef MAX #undef CHECK_MULT_EIGHT #undef CHECK_ERR #undef CHECK_ERR_FREE #undef CHECK_ERR_FREE_LZ #undef USESSE2 #undef USEAVX2
Parallel-Impl.h
/************************************************************************* > File Name: Parallel-Impl.h > Project Name: CubbyFlow > This code is based on Jet Framework that was created by Doyub Kim. > References: https://github.com/doyubkim/fluid-engine-dev > Purpose: Parallel functions for CubbyFlow. > Created Time: 2017/02/05 > Copyright (c) 2018, Chan-Ho Chris Ohk *************************************************************************/ #ifndef CUBBYFLOW_PARALLEL_IMPL_H #define CUBBYFLOW_PARALLEL_IMPL_H #include <Core/Utils/Constants.h> #include <Core/Utils/Parallel.h> #if defined(CUBBYFLOW_TASKING_HPX) #include <hpx/include/future.hpp> #include <hpx/include/parallel_fill.hpp> #include <hpx/include/parallel_for_each.hpp> #include <hpx/include/parallel_for_loop.hpp> #include <hpx/include/parallel_reduce.hpp> #include <hpx/include/parallel_sort.hpp> #endif #if defined(CUBBYFLOW_TASKING_TBB) #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/parallel_sort.h> #include <tbb/task.h> #elif defined(CUBBYFLOW_TASKING_CPP11THREAD) #include <thread> #endif #include <algorithm> #include <cmath> #include <future> #include <vector> #undef max #undef min namespace CubbyFlow { namespace Internal { #if defined(CUBBYFLOW_TASKING_HPX) template <typename Task> using future = hpx::future<Task>; #else template <typename Task> using future = std::future<Task>; #endif template <typename TASK> using operator_return_t = typename std::result_of<TASK()>::type; template <typename TASK> inline auto Async(TASK&& fn) -> future<operator_return_t<TASK>> { #if defined(CUBBYFLOW_TASKING_HPX) return hpx::async(std::forward<TASK>(fn)); #elif defined(CUBBYFLOW_TASKING_TBB) struct LocalTBBTask : public tbb::task { TASK func; LocalTBBTask(TASK&& f) : func(std::forward<TASK>(f)) { // Do nothing } tbb::task* execute() override { func(); return nullptr; } }; using package_t = std::packaged_task<operator_return_t<TASK>()>; auto task = new package_t(std::forward<TASK>(fn)); auto* tbbNode = new (tbb::task::allocate_root()) LocalTBBTask([=]() { (*task)(); delete task; }); tbb::task::enqueue(*tbbNode); return task.get_future(); #elif defined(CUBBYFLOW_TASKING_CPP11THREAD) return std::async(std::launch::async, fn); #else return std::async(std::launch::deferred, fn); #endif } // Adopted from: // Radenski, A. // Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and // Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel // and Distributed Processing Techniques and Applications, CSREA Press // (H. Arabnia, Ed.), 2011, pp. 367 - 373. template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void Merge(RandomIterator a, size_t size, RandomIterator2 temp, CompareFunction compareFunction) { size_t i1 = 0; size_t i2 = size / 2; size_t tempi = 0; while (i1 < size / 2 && i2 < size) { if (compareFunction(a[i1], a[i2])) { temp[tempi] = a[i1]; i1++; } else { temp[tempi] = a[i2]; i2++; } tempi++; } while (i1 < size / 2) { temp[tempi] = a[i1]; i1++; tempi++; } while (i2 < size) { temp[tempi] = a[i2]; i2++; tempi++; } // Copy sorted temp array into main array, a ParallelFor(ZERO_SIZE, size, [&](size_t i) { a[i] = temp[i]; }); } template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void ParallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp, unsigned int numThreads, CompareFunction compareFunction) { if (numThreads == 1) { std::sort(a, a + size, compareFunction); } else if (numThreads > 1) { std::vector<future<void>> pool; pool.reserve(2); auto launchRange = [compareFunction](RandomIterator begin, size_t k2, RandomIterator2 temp, unsigned int numThreads) { ParallelMergeSort(begin, k2, temp, numThreads, compareFunction); }; pool.emplace_back(Internal::Async([=]() { launchRange(a, size / 2, temp, numThreads / 2); })); pool.emplace_back(Internal::Async([=]() { launchRange(a + size / 2, size - size / 2, temp + size / 2, numThreads - numThreads / 2); })); // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } Merge(a, size, temp, compareFunction); } } } // namespace Internal template <typename RandomIterator, typename T> void ParallelFill(const RandomIterator& begin, const RandomIterator& end, const T& value, ExecutionPolicy policy) { auto diff = end - begin; if (diff <= 0) { return; } #if defined(CUBBYFLOW_TASKING_HPX) hpx::parallel::fill(hpx::parallel::execution::par, begin, end, value); #else size_t size = static_cast<size_t>(diff); ParallelFor(ZERO_SIZE, size, [begin, value](size_t i) { begin[i] = value; }, policy); #endif } // Adopted from http://ideone.com/Z7zldb template <typename IndexType, typename Function> void ParallelFor(IndexType beginIndex, IndexType endIndex, const Function& function, ExecutionPolicy policy) { if (beginIndex > endIndex) { return; } if (policy == ExecutionPolicy::Parallel) { #if defined(CUBBYFLOW_TASKING_TBB) (void)policy; tbb::parallel_for(beginIndex, endIndex, function); #elif defined(CUBBYFLOW_TASKING_HPX) (void)policy; hpx::parallel::for_loop(hpx::parallel::execution::par, beginIndex, endIndex, function); #elif defined(CUBBYFLOW_TASKING_CPP11THREAD) // Estimate number of threads in the pool const unsigned int numThreadsHint = GetMaxNumberOfThreads(); const unsigned int numThreads = (numThreadsHint == 0u) ? 8u : numThreadsHint; // Size of a slice for the range functions IndexType n = endIndex - beginIndex + 1; IndexType slice = static_cast<IndexType>(std::round(n / static_cast<double>(numThreads))); slice = std::max(slice, IndexType(1)); // [Helper] Inner loop auto launchRange = [&function](IndexType k1, IndexType k2) { for (IndexType k = k1; k < k2; ++k) { function(k); } }; // Create pool and launch jobs std::vector<std::thread> pool; pool.reserve(numThreads); IndexType i1 = beginIndex; IndexType i2 = std::min(beginIndex + slice, endIndex); for (unsigned int i = 0; i + 1 < numThreads && i1 < endIndex; ++i) { pool.emplace_back(launchRange, i1, i2); i1 = i2; i2 = std::min(i2 + slice, endIndex); } if (i1 < endIndex) { pool.emplace_back(launchRange, i1, endIndex); } // Wait for jobs to finish for (std::thread& t : pool) { if (t.joinable()) { t.join(); } } #else (void)policy; #if defined(CUBBYFLOW_TASKING_OPENMP) #pragma omp parallel for #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) for (ssize_t i = beginIndex; i < static_cast<ssize_t>(endIndex); ++i) { #else // !MSVC || Intel for (auto i = beginIndex; i < endIndex; ++i) { #endif // MSVC && !Intel function(i); } #else // CUBBYFLOW_TASKING_SERIAL for (auto i = beginIndex; i < endIndex; ++i) { function(i); } #endif // CUBBYFLOW_TASKING_OPENMP #endif } else { for (auto i = beginIndex; i < endIndex; ++i) { function(i); } } } template <typename IndexType, typename Function> void ParallelRangeFor(IndexType beginIndex, IndexType endIndex, const Function& function, ExecutionPolicy policy) { if (beginIndex > endIndex) { return; } if (policy == ExecutionPolicy::Parallel) { #if defined(CUBBYFLOW_TASKING_TBB) tbb::parallel_for( tbb::blocked_range<IndexType>(beginIndex, endIndex), [&function](const tbb::blocked_range<IndexType>& range) { function(range.begin(), range.end()); }); #else // Estimate number of threads in the pool const unsigned int numThreadsHint = GetMaxNumberOfThreads(); const unsigned int numThreads = numThreadsHint == 0u ? 8u : numThreadsHint; // Size of a slice for the range functions IndexType n = endIndex - beginIndex + 1; IndexType slice = static_cast<IndexType>( std::round(n / static_cast<double>(numThreads))); slice = std::max(slice, IndexType(1)); // Create pool and launch jobs std::vector<CubbyFlow::Internal::future<void>> pool; pool.reserve(numThreads); IndexType i1 = beginIndex; IndexType i2 = std::min(beginIndex + slice, endIndex); for (unsigned int i = 0; i + 1 < numThreads && i1 < endIndex; ++i) { pool.emplace_back(Internal::Async([=]() { function(i1, i2); })); i1 = i2; i2 = std::min(i2 + slice, endIndex); } if (i1 < endIndex) { pool.emplace_back( Internal::Async([=]() { function(i1, endIndex); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } #endif } else { function(beginIndex, endIndex); } } template <typename IndexType, typename Function> void ParallelFor( IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { ParallelFor(beginIndexY, endIndexY, [&](IndexType j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j); } }, policy); } template <typename IndexType, typename Function> void ParallelRangeFor( IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { ParallelRangeFor(beginIndexY, endIndexY, [&](IndexType jBegin, IndexType jEnd) { function(beginIndexX, endIndexX, jBegin, jEnd); }, policy); } template <typename IndexType, typename Function> void ParallelFor( IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { ParallelFor(beginIndexZ, endIndexZ, [&](IndexType k) { for (IndexType j = beginIndexY; j < endIndexY; ++j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j, k); } } }, policy); } template <typename IndexType, typename Function> void ParallelRangeFor( IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { ParallelRangeFor(beginIndexZ, endIndexZ, [&](IndexType kBegin, IndexType kEnd) { function(beginIndexX, endIndexX, beginIndexY, endIndexY, kBegin, kEnd); }, policy); } template <typename IndexType, typename Value, typename Function, typename Reduce> Value ParallelReduce(IndexType beginIndex, IndexType endIndex, const Value& identity, const Function& function, const Reduce& reduce, ExecutionPolicy policy) { if (beginIndex > endIndex) { return identity; } if (policy == ExecutionPolicy::Parallel) { #if defined(CUBBYFLOW_TASKING_TBB) return tbb::parallel_reduce( tbb::blocked_range<IndexType>(beginIndex, endIndex), identity, [&function](const tbb::blocked_range<IndexType>& range, const Value& init) { return function(range.begin(), range.end(), init); }, reduce); #else // Estimate number of threads in the pool const unsigned int numThreadsHint = GetMaxNumberOfThreads(); const unsigned int numThreads = (numThreadsHint == 0u) ? 8u : numThreadsHint; // Size of a slice for the range functions IndexType n = endIndex - beginIndex + 1; IndexType slice = static_cast<IndexType>(std::round(n / static_cast<double>(numThreads))); slice = std::max(slice, IndexType(1)); // Results std::vector<Value> results(numThreads, identity); // [Helper] Inner loop auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid) { results[tid] = function(k1, k2, identity); }; // Create pool and launch jobs std::vector<CubbyFlow::Internal::future<void>> pool; pool.reserve(numThreads); IndexType i1 = beginIndex; IndexType i2 = std::min(beginIndex + slice, endIndex); unsigned int threadID = 0; for (; threadID + 1 < numThreads && i1 < endIndex; ++threadID) { pool.emplace_back(Internal::Async([=]() { launchRange(i1, i2, threadID); })); i1 = i2; i2 = std::min(i2 + slice, endIndex); } if (i1 < endIndex) { pool.emplace_back(Internal::Async([=]() { launchRange(i1, endIndex, threadID); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } // Gather Value finalResult = identity; for (const Value& val : results) { finalResult = reduce(val, finalResult); } return finalResult; #endif } (void)reduce; return function(beginIndex, endIndex, identity); } template <typename RandomIterator> void ParallelSort(RandomIterator begin, RandomIterator end, ExecutionPolicy policy) { ParallelSort(begin, end, std::less<typename std::iterator_traits<RandomIterator>::value_type>(), policy); } template <typename RandomIterator, typename CompareFunction> void ParallelSort(RandomIterator begin, RandomIterator end, CompareFunction compareFunction, ExecutionPolicy policy) { if (begin > end) { return; } if (policy == ExecutionPolicy::Parallel) { #if defined(CUBBYFLOW_TASKING_HPX) hpx::parallel::sort(hpx::parallel::execution::par, begin, end, compareFunction); #elif defined(CUBBYFLOW_TASKING_TBB) tbb::parallel_sort(begin, end, compareFunction); #else size_t size = static_cast<size_t>(end - begin); using value_type = typename std::iterator_traits<RandomIterator>::value_type; std::vector<value_type> temp(size); // Estimate number of threads in the pool const unsigned int numThreadsHint = GetMaxNumberOfThreads(); const unsigned int numThreads = (numThreadsHint == 0u) ? 8u : numThreadsHint; Internal::ParallelMergeSort(begin, size, temp.begin(), numThreads, compareFunction); #endif } else { std::sort(begin, end, compareFunction); } } } // namespace CubbyFlow #endif
main.c
/*Se in input viene dato un numero di numeri (N) non divisibile per il numero di threads(t), allora è necessario effettuare una modifica. */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> int main() { int N, i, t, nloc, r, id, step; //La variabile step float sumtot, sum, *a, t0, t1, t_tot;; sumtot = 0; printf("Inserire N: "); scanf("%d", &N); a = (float*)calloc(N,sizeof(float)); //array allocato dinamicamente. printf("Inserire i numeri da sommare: \n"); for(i = 0; i < N; i++) { scanf("%f", &a[i]); } t0 = omp_get_wtime(); #pragma omp parallel private(sum, nloc, i, id, step) shared(r) reduction(+:sumtot) { t = omp_get_num_threads(); nloc = N/t; printf("Sono il thread %d, di %d: numeri %d\n", omp_get_thread_num(), t, nloc); r = N%t; //resto id = omp_get_thread_num(); //recupero l'id del thread. printf("Sono il thread con id %d, di %d: numeri %d, resto =% d\n",id,t,nloc,r); if (id < r) { nloc++; step = 0; } else step = r; printf("Sono il thread con id %d, di %d: numeri %d, resto = %d, step = %d\n",id,t,nloc,r,step); sum = 0; for(i = 0; i < nloc; i++) { sum = sum+a[i+nloc*omp_get_thread_num()+step]; } printf("Sono il thread con id %d, di %d: numeri %d, resto = %d, sum = %f\n",id,t,nloc,r,sum); sumtot+=sum; t1 = omp_get_wtime(); } printf("La somma totale e': %f\n", sumtot); t_tot = t1-t0; printf ("Tempo totale: %f \n", t_tot); }
main.c
void foo(int N, int *A) { int TSize = 4; int T[4]; #pragma spf region name(ignore) for (int I = 0; I < TSize; ++I) T[I] = I; #pragma spf region name(parallel) #pragma omp parallel default(shared) { #pragma omp for for (int I = 0; I < N; ++I) { A[I] = I; for (int J = 0; J < TSize; ++J) A[I] = A[I] + T[J]; } } }
omp_for_collapse.c
// RUN: %libomp-compile-and-run // REQUIRES: !abt #include <stdio.h> #include <math.h> #include "omp_testsuite.h" /* Utility function to check that i is increasing monotonically with each call */ static int check_i_islarger (int i) { static int last_i; int islarger; if (i==1) last_i=0; islarger = ((i >= last_i)&&(i - last_i<=1)); last_i = i; return (islarger); } int test_omp_for_collapse() { int is_larger = 1; #pragma omp parallel { int i,j; int my_islarger = 1; #pragma omp for private(i,j) schedule(static,1) collapse(2) ordered for (i = 1; i < 100; i++) { for (j =1; j <100; j++) { #pragma omp ordered my_islarger = check_i_islarger(i)&&my_islarger; } } #pragma omp critical is_larger = is_larger && my_islarger; } return (is_larger); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_collapse()) { num_failed++; } } return num_failed; }
GB_unaryop__identity_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_uint64 // op(A') function: GB_tran__identity_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_uint64 ( int32_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) { for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(8*t3+Nx-5,128));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
phylokernelnonrev.h
/* * phylokernelnonrev.h * Kernel based on vectorizing over alignment patterns for non-reversible models * * Created on: Nov 4, 2016 * Author: minh */ #if !defined(PHYLOKERNELNONREV_H_) || !defined(PHYLOKERNELNONREV_STATE_H_) #ifdef KERNEL_FIX_STATES #define PHYLOKERNELNONREV_STATE_H_ #else #define PHYLOKERNELNONREV_H_ #endif #include "phylotree.h" #ifdef _OPENMP #include <omp.h> #endif //#include <thread> using namespace std; #ifdef KERNEL_FIX_STATES template <class VectorClass, const int nstates, const bool FMA> void PhyloTree::computeNonrevPartialLikelihoodSIMD(TraversalInfo &info, size_t ptn_lower, size_t ptn_upper, int thread_id) { #else template <class VectorClass, const bool FMA> void PhyloTree::computeNonrevPartialLikelihoodGenericSIMD(TraversalInfo &info, size_t ptn_lower, size_t ptn_upper, int thread_id) { #endif PhyloNeighbor *dad_branch = info.dad_branch; PhyloNode *dad = info.dad; ASSERT(dad); PhyloNode *node = (PhyloNode*)(dad_branch->node); // assert(dad_branch->direction != UNDEFINED_DIRECTION); #ifndef KERNEL_FIX_STATES size_t nstates = aln->num_states; #endif if (node->isLeaf()) { return; } ASSERT(node->degree() >= 3); size_t ptn, c; size_t orig_nptn = aln->size(); size_t max_orig_nptn = ((orig_nptn+VectorClass::size()-1)/VectorClass::size())*VectorClass::size(); size_t nptn = max_orig_nptn+model_factory->unobserved_ptns.size(); size_t ncat = site_rate->getNRate(); size_t ncat_mix = (model_factory->fused_mix_rate) ? ncat : ncat*model->getNMixtures(); size_t i, x; size_t block = nstates * ncat_mix; // internal node PhyloNeighbor *left = NULL, *right = NULL; // left & right are two neighbors leading to 2 subtrees FOR_NEIGHBOR_IT(node, dad, it) { if (!left) left = (PhyloNeighbor*)(*it); else right = (PhyloNeighbor*)(*it); } // precomputed buffer to save times double *buffer_partial_lh_ptr = buffer_partial_lh + (getBufferPartialLhSize() - 2*block*VectorClass::size()*num_threads); double *echildren = info.echildren; double *partial_lh_leaves = info.partial_lh_leaves; double *eleft = echildren, *eright = echildren + block*nstates; if ((!left->node->isLeaf() && right->node->isLeaf())) { PhyloNeighbor *tmp = left; left = right; right = tmp; double *etmp = eleft; eleft = eright; eright = etmp; } if (node->degree() > 3) { /*--------------------- multifurcating node ------------------*/ double *vec_tip = buffer_partial_lh_ptr + (block*2)*VectorClass::size()*thread_id; VectorClass *vtip = (VectorClass*)vec_tip; // now for-loop computing partial_lh over all site-patterns for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass *partial_lh_all = (VectorClass*)(dad_branch->partial_lh + ptn*block); for (i = 0; i < block; i++) partial_lh_all[i] = 1.0; memset(&dad_branch->scale_num[ptn], 0, sizeof(UBYTE)*VectorClass::size()); double *partial_lh_leaf = partial_lh_leaves; double *echild = echildren; FOR_NEIGHBOR_IT(node, dad, it) { PhyloNeighbor *child = (PhyloNeighbor*)*it; if (child->node->isLeaf()) { // external node // load data for tip for (x = 0; x < VectorClass::size(); x++) { double *tip_child; if (isRootLeaf(child->node)) tip_child = partial_lh_leaf; else if (ptn+x < orig_nptn) tip_child = partial_lh_leaf + block * (aln->at(ptn+x))[child->node->id]; else if (ptn+x < max_orig_nptn) tip_child = partial_lh_leaf + block * aln->STATE_UNKNOWN; else if (ptn+x < nptn) tip_child = partial_lh_leaf + block * model_factory->unobserved_ptns[ptn+x-max_orig_nptn]; else tip_child = partial_lh_leaf + block * aln->STATE_UNKNOWN; double *this_vec_tip = vec_tip+x; for (i = 0; i < block; i++) { *this_vec_tip = tip_child[i]; this_vec_tip += VectorClass::size(); } } for (c = 0; c < block; c++) { // compute real partial likelihood vector partial_lh_all[c] *= vtip[c]; } partial_lh_leaf += (aln->STATE_UNKNOWN+1)*block; } else { // internal node VectorClass *partial_lh = partial_lh_all; VectorClass *partial_lh_child = (VectorClass*)(child->partial_lh + ptn*block); for (i = 0; i < VectorClass::size(); i++) dad_branch->scale_num[ptn+i] += child->scale_num[ptn+i]; double *echild_ptr = echild; for (c = 0; c < ncat_mix; c++) { // compute real partial likelihood vector for (x = 0; x < nstates; x++) { VectorClass vchild; // for (i = 0; i < nstates; i++) { // vchild += echild_ptr[i] * partial_lh_child[i]; // } #ifdef KERNEL_FIX_STATES dotProductVec<VectorClass, double, nstates, FMA>(echild_ptr, partial_lh_child, vchild); #else dotProductVec<VectorClass, double, FMA>(echild_ptr, partial_lh_child, vchild, nstates); #endif echild_ptr += nstates; partial_lh[x] *= vchild; } partial_lh += nstates; partial_lh_child += nstates; } } // if echild += block*nstates; } // FOR_NEIGHBOR VectorClass lh_max = partial_lh_all[0]; for (i = 1; i < block; i++) lh_max = max(lh_max, partial_lh_all[i]); // check if one should scale partial likelihoods auto underflown = (lh_max < SCALING_THRESHOLD); if (horizontal_or(underflown)) { // now do the likelihood scaling for (x = 0; x < VectorClass::size(); x++) if (underflown[x]) { double *partial_lh = dad_branch->partial_lh + (ptn*block + x); // now do the likelihood scaling for (i = 0; i < block; i++) { partial_lh[i*VectorClass::size()] = ldexp(partial_lh[i*VectorClass::size()], SCALING_THRESHOLD_EXP); } dad_branch->scale_num[ptn+x] += 1; } } } // for ptn // end multifurcating treatment } else if (left->node->isLeaf() && right->node->isLeaf()) { /*--------------------- TIP-TIP (cherry) case ------------------*/ double *partial_lh_left = partial_lh_leaves; double *partial_lh_right = partial_lh_leaves + (aln->STATE_UNKNOWN+1)*block; double *vec_left = buffer_partial_lh_ptr + (block*2)*VectorClass::size()*thread_id; double *vec_right = &vec_left[block*VectorClass::size()]; if (isRootLeaf(right->node)) { // swap so that left node is the root PhyloNeighbor *tmp = left; left = right; right = tmp; double *etmp = eleft; eleft = eright; eright = etmp; etmp = partial_lh_left; partial_lh_left = partial_lh_right; partial_lh_right = etmp; } // scale number must be ZERO memset(dad_branch->scale_num + ptn_lower, 0, (ptn_upper-ptn_lower) * sizeof(UBYTE)); if (isRootLeaf(left->node)) { for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { double *vright = dad_branch->partial_lh + ptn*block; VectorClass *partial_lh = (VectorClass*)vright; // load data for tip for (x = 0; x < VectorClass::size(); x++) { double *tip_right; if (ptn+x < orig_nptn) tip_right = partial_lh_right + block * (aln->at(ptn+x))[right->node->id]; else if (ptn+x < max_orig_nptn) tip_right = partial_lh_right + block * aln->STATE_UNKNOWN; else if (ptn+x < nptn) tip_right = partial_lh_right + block * model_factory->unobserved_ptns[ptn+x-max_orig_nptn]; else tip_right = partial_lh_right + block * aln->STATE_UNKNOWN; double *this_vec_right = vright+x; for (i = 0; i < block; i++) { *this_vec_right = tip_right[i]; this_vec_right += VectorClass::size(); } } for (i = 0; i < block; i++) partial_lh[i] *= partial_lh_left[i]; } } else for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass *partial_lh = (VectorClass*)(dad_branch->partial_lh + ptn*block); VectorClass *vleft = (VectorClass*)vec_left; VectorClass *vright = (VectorClass*)vec_right; // load data for tip for (x = 0; x < VectorClass::size(); x++) { double *tip_left, *tip_right; if (ptn+x < orig_nptn) { tip_left = partial_lh_left + block * (aln->at(ptn+x))[left->node->id]; tip_right = partial_lh_right + block * (aln->at(ptn+x))[right->node->id]; } else if (ptn+x < max_orig_nptn) { tip_left = partial_lh_left + block * aln->STATE_UNKNOWN; tip_right = partial_lh_right + block * aln->STATE_UNKNOWN; } else if (ptn+x < nptn) { tip_left = partial_lh_left + block * model_factory->unobserved_ptns[ptn+x-max_orig_nptn]; tip_right = partial_lh_right + block * model_factory->unobserved_ptns[ptn+x-max_orig_nptn]; } else { tip_left = partial_lh_left + block * aln->STATE_UNKNOWN; tip_right = partial_lh_right + block * aln->STATE_UNKNOWN; } double *this_vec_left = vec_left+x; double *this_vec_right = vec_right+x; for (i = 0; i < block; i++) { *this_vec_left = tip_left[i]; *this_vec_right = tip_right[i]; this_vec_left += VectorClass::size(); this_vec_right += VectorClass::size(); } } for (i = 0; i < block; i++) partial_lh[i] = vleft[i] * vright[i]; } } else if (isRootLeaf(left->node) && !right->node->isLeaf()) { // left is root node /*--------------------- ROOT-INTERNAL NODE case ------------------*/ // only take scale_num from the right subtree memcpy(dad_branch->scale_num + ptn_lower, right->scale_num + ptn_lower, (ptn_upper-ptn_lower) * sizeof(UBYTE)); double *partial_lh_left = partial_lh_leaves; for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass *partial_lh = (VectorClass*)(dad_branch->partial_lh + ptn*block); VectorClass *partial_lh_right = (VectorClass*)(right->partial_lh + ptn*block); double *eright_ptr = eright; double *lh_left = partial_lh_left; for (c = 0; c < ncat_mix; c++) { // compute real partial likelihood vector for (x = 0; x < nstates; x++) { VectorClass vright; #ifdef KERNEL_FIX_STATES dotProductVec<VectorClass, double, nstates, FMA>(eright_ptr, partial_lh_right, vright); #else dotProductVec<VectorClass, double, FMA>(eright_ptr, partial_lh_right, vright, nstates); #endif eright_ptr += nstates; partial_lh[x] = lh_left[x]*vright; } partial_lh_right += nstates; lh_left += nstates; partial_lh += nstates; } } } else if (left->node->isLeaf() && !right->node->isLeaf()) { /*--------------------- TIP-INTERNAL NODE case ------------------*/ // only take scale_num from the right subtree memcpy(dad_branch->scale_num + ptn_lower, right->scale_num + ptn_lower, (ptn_upper-ptn_lower) * sizeof(UBYTE)); double *partial_lh_left = partial_lh_leaves; double *vec_left = buffer_partial_lh_ptr + (block*2)*VectorClass::size()*thread_id; for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass *partial_lh = (VectorClass*)(dad_branch->partial_lh + ptn*block); VectorClass *partial_lh_right = (VectorClass*)(right->partial_lh + ptn*block); VectorClass *vleft = (VectorClass*)vec_left; // load data for tip for (x = 0; x < VectorClass::size(); x++) { double *tip; if (ptn+x < orig_nptn) tip = partial_lh_left + block*(aln->at(ptn+x))[left->node->id]; else if (ptn+x < max_orig_nptn) tip = partial_lh_left + block*aln->STATE_UNKNOWN; else if (ptn+x < nptn) tip = partial_lh_left + block*model_factory->unobserved_ptns[ptn+x-max_orig_nptn]; else tip = partial_lh_left + block*aln->STATE_UNKNOWN; double *this_vec_left = vec_left+x; for (i = 0; i < block; i++) { *this_vec_left = tip[i]; this_vec_left += VectorClass::size(); } } VectorClass lh_max = 0.0; double *eright_ptr = eright; for (c = 0; c < ncat_mix; c++) { // compute real partial likelihood vector for (x = 0; x < nstates; x++) { VectorClass vright; #ifdef KERNEL_FIX_STATES dotProductVec<VectorClass, double, nstates, FMA>(eright_ptr, partial_lh_right, vright); #else dotProductVec<VectorClass, double, FMA>(eright_ptr, partial_lh_right, vright, nstates); #endif eright_ptr += nstates; lh_max = max(lh_max, (partial_lh[x] = vleft[x]*vright)); } vleft += nstates; partial_lh_right += nstates; partial_lh += nstates; } // check if one should scale partial likelihoods auto underflown = (lh_max < SCALING_THRESHOLD); if (horizontal_or(underflown)) { // now do the likelihood scaling for (x = 0; x < VectorClass::size(); x++) if (underflown[x]) { double *partial_lh = dad_branch->partial_lh + (ptn*block + x); // now do the likelihood scaling for (i = 0; i < block; i++) { partial_lh[i*VectorClass::size()] = ldexp(partial_lh[i*VectorClass::size()], SCALING_THRESHOLD_EXP); } dad_branch->scale_num[ptn+x] += 1; } } } } else { /*--------------------- INTERNAL-INTERNAL NODE case ------------------*/ for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass *partial_lh = (VectorClass*)(dad_branch->partial_lh + ptn*block); VectorClass *partial_lh_left = (VectorClass*)(left->partial_lh + ptn*block); VectorClass *partial_lh_right = (VectorClass*)(right->partial_lh + ptn*block); VectorClass lh_max = 0.0; for (i = 0; i < VectorClass::size(); i++) dad_branch->scale_num[ptn+i] = left->scale_num[ptn+i] + right->scale_num[ptn+i]; double *eleft_ptr = eleft; double *eright_ptr = eright; for (c = 0; c < ncat_mix; c++) { // compute real partial likelihood vector for (x = 0; x < nstates; x++) { #ifdef KERNEL_FIX_STATES dotProductDualVec<VectorClass, double, nstates, FMA>(eleft_ptr, partial_lh_left, eright_ptr, partial_lh_right, partial_lh[x]); #else dotProductDualVec<VectorClass, double, FMA>(eleft_ptr, partial_lh_left, eright_ptr, partial_lh_right, partial_lh[x], nstates); #endif eleft_ptr += nstates; eright_ptr += nstates; lh_max=max(lh_max, partial_lh[x]); } partial_lh_left += nstates; partial_lh_right += nstates; partial_lh += nstates; } // check if one should scale partial likelihoods auto underflown = (lh_max < SCALING_THRESHOLD); if (horizontal_or(underflown)) { // now do the likelihood scaling for (x = 0; x < VectorClass::size(); x++) if (underflown[x]) { double *partial_lh = dad_branch->partial_lh + (ptn*block + x); // now do the likelihood scaling for (i = 0; i < block; i++) { partial_lh[i*VectorClass::size()] = ldexp(partial_lh[i*VectorClass::size()], SCALING_THRESHOLD_EXP); } dad_branch->scale_num[ptn+x] += 1; } } } } } #ifdef KERNEL_FIX_STATES template <class VectorClass, const int nstates, const bool FMA> void PhyloTree::computeNonrevLikelihoodDervSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad, double *df, double *ddf) { #else template <class VectorClass, const bool FMA> void PhyloTree::computeNonrevLikelihoodDervGenericSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad, double *df, double *ddf) { #endif // assert(rooted); PhyloNode *node = (PhyloNode*) dad_branch->node; PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad); if (!central_partial_lh) initializeAllPartialLh(); if (node->isLeaf() || (dad_branch->direction == AWAYFROM_ROOT && !isRootLeaf(dad))) { PhyloNode *tmp_node = dad; dad = node; node = tmp_node; PhyloNeighbor *tmp_nei = dad_branch; dad_branch = node_branch; node_branch = tmp_nei; } #ifdef KERNEL_FIX_STATES computeTraversalInfo<VectorClass, nstates>(node, dad, false); #else computeTraversalInfo<VectorClass>(node, dad, false); #endif #ifndef KERNEL_FIX_STATES size_t nstates = aln->num_states; #endif size_t nstatesqr = nstates*nstates; size_t ncat = site_rate->getNRate(); size_t ncat_mix = (model_factory->fused_mix_rate) ? ncat : ncat*model->getNMixtures(); size_t denom = (model_factory->fused_mix_rate) ? 1 : ncat; size_t block = ncat_mix * nstates; size_t ptn; // for big data size > 4GB memory required size_t c, i; size_t orig_nptn = aln->size(); size_t max_orig_nptn = ((orig_nptn+VectorClass::size()-1)/VectorClass::size())*VectorClass::size(); size_t nptn = max_orig_nptn+model_factory->unobserved_ptns.size(); bool isASC = model_factory->unobserved_ptns.size() > 0; // double *trans_mat = new double[block*nstates*3]; double *trans_mat = buffer_partial_lh; double *trans_derv1 = buffer_partial_lh + block*nstates; double *trans_derv2 = trans_derv1 + block*nstates; double *buffer_partial_lh_ptr = buffer_partial_lh + get_safe_upper_limit(3*block*nstates); for (c = 0; c < ncat_mix; c++) { size_t mycat = c%ncat; size_t m = c/denom; double cat_rate = site_rate->getRate(mycat); double len = cat_rate * dad_branch->length; double prop = site_rate->getProp(mycat) * model->getMixtureWeight(m); double *this_trans_mat = &trans_mat[c*nstatesqr]; double *this_trans_derv1 = &trans_derv1[c*nstatesqr]; double *this_trans_derv2 = &trans_derv2[c*nstatesqr]; model->computeTransDerv(len, this_trans_mat, this_trans_derv1, this_trans_derv2, m); double prop_rate = prop * cat_rate; double prop_rate_2 = prop_rate * cat_rate; for (i = 0; i < nstatesqr; i++) { this_trans_mat[i] *= prop; this_trans_derv1[i] *= prop_rate; this_trans_derv2[i] *= prop_rate_2; } if (!rooted) { // for unrooted tree, multiply with state_freq double state_freq[nstates]; model->getStateFrequency(state_freq, m); for (i = 0; i < nstates; i++) { for (size_t x = 0; x < nstates; x++) { this_trans_mat[x] *= state_freq[i]; this_trans_derv1[x] *= state_freq[i]; this_trans_derv2[x] *= state_freq[i]; } this_trans_mat += nstates; this_trans_derv1 += nstates; this_trans_derv2 += nstates; } } } VectorClass all_df(0.0), all_ddf(0.0); VectorClass all_prob_const(0.0), all_df_const(0.0), all_ddf_const(0.0); vector<size_t> limits; computeBounds<VectorClass>(num_threads, nptn, limits); // double *buffer_partial_lh_ptr = buffer_partial_lh; if (dad->isLeaf()) { // make sure that we do not estimate the virtual branch length from the root ASSERT(!isRootLeaf(dad)); // special treatment for TIP-INTERNAL NODE case // double *partial_lh_node = new double[(aln->STATE_UNKNOWN+1)*block*3]; double *partial_lh_node = buffer_partial_lh_ptr; double *partial_lh_derv1 = partial_lh_node + (aln->STATE_UNKNOWN+1)*block; double *partial_lh_derv2 = partial_lh_derv1 + (aln->STATE_UNKNOWN+1)*block; buffer_partial_lh_ptr += get_safe_upper_limit((aln->STATE_UNKNOWN+1)*block*3); IntVector states_dad = aln->seq_states[dad->id]; states_dad.push_back(aln->STATE_UNKNOWN); // precompute information from one tip for (IntVector::iterator it = states_dad.begin(); it != states_dad.end(); it++) { double *lh_node = partial_lh_node +(*it)*block; double *lh_derv1 = partial_lh_derv1 +(*it)*block; double *lh_derv2 = partial_lh_derv2 +(*it)*block; double *lh_tip = tip_partial_lh + (*it)*nstates; double *trans_mat_tmp = trans_mat; double *trans_derv1_tmp = trans_derv1; double *trans_derv2_tmp = trans_derv2; for (c = 0; c < ncat_mix; c++) { for (i = 0; i < nstates; i++) { lh_node[i] = 0.0; lh_derv1[i] = 0.0; lh_derv2[i] = 0.0; for (size_t x = 0; x < nstates; x++) { lh_node[i] += trans_mat_tmp[x] * lh_tip[x]; lh_derv1[i] += trans_derv1_tmp[x] * lh_tip[x]; lh_derv2[i] += trans_derv2_tmp[x] * lh_tip[x]; } trans_mat_tmp += nstates; trans_derv1_tmp += nstates; trans_derv2_tmp += nstates; } lh_node += nstates; lh_derv1 += nstates; lh_derv2 += nstates; } } // now do the real computation #ifdef _OPENMP #pragma omp parallel for private(ptn, i, c) schedule(static,1) num_threads(num_threads) #endif for (int thread_id = 0; thread_id < num_threads; thread_id++) { VectorClass my_df(0.0), my_ddf(0.0), vc_prob_const(0.0), vc_df_const(0.0), vc_ddf_const(0.0); size_t ptn_lower = limits[thread_id]; size_t ptn_upper = limits[thread_id+1]; // first compute partial_lh for (vector<TraversalInfo>::iterator it = traversal_info.begin(); it != traversal_info.end(); it++) computePartialLikelihood(*it, ptn_lower, ptn_upper, thread_id); double *vec_tip = buffer_partial_lh_ptr + block*3*VectorClass::size()*thread_id; for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass lh_ptn, df_ptn, ddf_ptn; VectorClass *partial_lh_dad = (VectorClass*)(dad_branch->partial_lh + ptn*block); //load tip vector for (i = 0; i < VectorClass::size(); i++) { size_t state_dad; if (ptn+i < orig_nptn) state_dad = block * (aln->at(ptn+i))[dad->id]; else if (ptn+i < max_orig_nptn) state_dad = block * aln->STATE_UNKNOWN; else if (ptn+i < nptn) state_dad = block * model_factory->unobserved_ptns[ptn+i-max_orig_nptn]; else state_dad = block * aln->STATE_UNKNOWN; double *lh_tip = partial_lh_node + state_dad; double *lh_derv1 = partial_lh_derv1 + state_dad; double *lh_derv2 = partial_lh_derv2 + state_dad; double *this_vec_tip = vec_tip+i; double *this_derv1 = this_vec_tip + block*VectorClass::size(); double *this_derv2 = this_derv1 + block*VectorClass::size(); for (c = 0; c < block; c++) { *this_vec_tip = lh_tip[c]; *this_derv1 = lh_derv1[c]; *this_derv2 = lh_derv2[c]; this_vec_tip += VectorClass::size(); this_derv1 += VectorClass::size(); this_derv2 += VectorClass::size(); } } VectorClass *lh_node = (VectorClass*)vec_tip; VectorClass *lh_derv1 = (VectorClass*)vec_tip + block; VectorClass *lh_derv2 = (VectorClass*)lh_derv1 + block; #ifdef KERNEL_FIX_STATES dotProductTriple<VectorClass, VectorClass, nstates, FMA, false>(lh_node, lh_derv1, lh_derv2, partial_lh_dad, lh_ptn, df_ptn, ddf_ptn, block); #else dotProductTriple<VectorClass, VectorClass, FMA, false>(lh_node, lh_derv1, lh_derv2, partial_lh_dad, lh_ptn, df_ptn, ddf_ptn, block, nstates); #endif lh_ptn = (lh_ptn + VectorClass().load_a(&ptn_invar[ptn])); if (ptn < orig_nptn) { lh_ptn = 1.0 / lh_ptn; VectorClass df_frac = df_ptn * lh_ptn; VectorClass ddf_frac = ddf_ptn * lh_ptn; VectorClass freq; freq.load_a(&ptn_freq[ptn]); VectorClass tmp1 = df_frac * freq; VectorClass tmp2 = ddf_frac * freq; my_df += tmp1; my_ddf += nmul_add(tmp1, df_frac, tmp2); } else { if (ptn+VectorClass::size() > nptn) { // cutoff the last entries if going beyond lh_ptn.cutoff(nptn-ptn); df_ptn.cutoff(nptn-ptn); ddf_ptn.cutoff(nptn-ptn); } // bugfix 2016-01-21, prob_const can be rescaled double *lh_ptn_ptr = (double*)&lh_ptn; for (i = 0; i < VectorClass::size(); i++) if (dad_branch->scale_num[ptn+i] >= 1) lh_ptn_ptr[i] *= SCALING_THRESHOLD; vc_prob_const += lh_ptn; vc_df_const += df_ptn; vc_ddf_const += ddf_ptn; } } // FOR ptn #ifdef _OPENMP #pragma omp critical #endif { all_df += my_df; all_ddf += my_ddf; if (isASC) { all_prob_const += vc_prob_const; all_df_const += vc_df_const; all_ddf_const += vc_ddf_const; } } } // FOR thread_id // delete [] partial_lh_node; } else { // both dad and node are internal nodes #ifdef _OPENMP #pragma omp parallel for private(ptn, i, c) schedule(static,1) num_threads(num_threads) #endif for (int thread_id = 0; thread_id < num_threads; thread_id++) { VectorClass my_df(0.0), my_ddf(0.0), vc_prob_const(0.0), vc_df_const(0.0), vc_ddf_const(0.0); size_t ptn_lower = limits[thread_id]; size_t ptn_upper = limits[thread_id+1]; // first compute partial_lh for (vector<TraversalInfo>::iterator it = traversal_info.begin(); it != traversal_info.end(); it++) computePartialLikelihood(*it, ptn_lower, ptn_upper, thread_id); for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass lh_ptn(0.0), df_ptn(0.0), ddf_ptn(0.0); VectorClass *partial_lh_dad = (VectorClass*)(dad_branch->partial_lh + ptn*block); VectorClass *partial_lh_node = (VectorClass*)(node_branch->partial_lh + ptn*block); double *trans_mat_tmp = trans_mat; double *trans_derv1_tmp = trans_derv1; double *trans_derv2_tmp = trans_derv2; for (c = 0; c < ncat_mix; c++) { for (i = 0; i < nstates; i++) { VectorClass lh_state; VectorClass lh_derv1; VectorClass lh_derv2; #ifdef KERNEL_FIX_STATES dotProductTriple<VectorClass, double, nstates, FMA, false>(trans_mat_tmp, trans_derv1_tmp, trans_derv2_tmp, partial_lh_node, lh_state, lh_derv1, lh_derv2, nstates); #else dotProductTriple<VectorClass, double, FMA, false>(trans_mat_tmp, trans_derv1_tmp, trans_derv2_tmp, partial_lh_node, lh_state, lh_derv1, lh_derv2, nstates, nstates); #endif lh_ptn = mul_add(partial_lh_dad[i], lh_state, lh_ptn); df_ptn = mul_add(partial_lh_dad[i], lh_derv1, df_ptn); ddf_ptn = mul_add(partial_lh_dad[i], lh_derv2, ddf_ptn); trans_mat_tmp += nstates; trans_derv1_tmp += nstates; trans_derv2_tmp += nstates; } partial_lh_node += nstates; partial_lh_dad += nstates; } lh_ptn = (lh_ptn + VectorClass().load_a(&ptn_invar[ptn])); if (ptn < orig_nptn) { lh_ptn = 1.0 / lh_ptn; VectorClass df_frac = df_ptn * lh_ptn; VectorClass ddf_frac = ddf_ptn * lh_ptn; VectorClass freq; freq.load_a(&ptn_freq[ptn]); VectorClass tmp1 = df_frac * freq; VectorClass tmp2 = ddf_frac * freq; my_df += tmp1; my_ddf += nmul_add(tmp1, df_frac, tmp2); } else { if (ptn+VectorClass::size() > nptn) { // cutoff the last entries if going beyond lh_ptn.cutoff(nptn-ptn); df_ptn.cutoff(nptn-ptn); ddf_ptn.cutoff(nptn-ptn); } // bugfix 2016-01-21, prob_const can be rescaled double *lh_ptn_ptr = (double*)&lh_ptn; for (i = 0; i < VectorClass::size(); i++) if (dad_branch->scale_num[ptn+i] >= 1) lh_ptn_ptr[i] *= SCALING_THRESHOLD; vc_prob_const += lh_ptn; vc_df_const += df_ptn; vc_ddf_const += ddf_ptn; } } // FOR ptn #ifdef _OPENMP #pragma omp critical #endif { all_df += my_df; all_ddf += my_ddf; if (isASC) { all_prob_const += vc_prob_const; all_df_const += vc_df_const; all_ddf_const += vc_ddf_const; } } } // FOR thread } *df = horizontal_add(all_df); *ddf = horizontal_add(all_ddf); ASSERT(std::isfinite(*df) && "Numerical underflow for non-rev lh-derivative"); if (isASC) { double prob_const = 0.0, df_const = 0.0, ddf_const = 0.0; prob_const = horizontal_add(all_prob_const); df_const = horizontal_add(all_df_const); ddf_const = horizontal_add(all_ddf_const); // ascertainment bias correction prob_const = 1.0 - prob_const; double df_frac = df_const / prob_const; double ddf_frac = ddf_const / prob_const; int nsites = aln->getNSite(); *df += nsites * df_frac; *ddf += nsites *(ddf_frac + df_frac*df_frac); } } #ifdef KERNEL_FIX_STATES template <class VectorClass, const int nstates, const bool FMA> double PhyloTree::computeNonrevLikelihoodBranchSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) { #else template <class VectorClass, const bool FMA> double PhyloTree::computeNonrevLikelihoodBranchGenericSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) { #endif // assert(rooted); PhyloNode *node = (PhyloNode*) dad_branch->node; PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad); if (!central_partial_lh) initializeAllPartialLh(); if (node->isLeaf() || (dad_branch->direction == AWAYFROM_ROOT && !isRootLeaf(dad))) { PhyloNode *tmp_node = dad; dad = node; node = tmp_node; PhyloNeighbor *tmp_nei = dad_branch; dad_branch = node_branch; node_branch = tmp_nei; } #ifdef KERNEL_FIX_STATES computeTraversalInfo<VectorClass, nstates>(node, dad, false); #else computeTraversalInfo<VectorClass>(node, dad, false); #endif double tree_lh = 0.0; #ifndef KERNEL_FIX_STATES size_t nstates = aln->num_states; #endif size_t nstatesqr = nstates*nstates; size_t ncat = site_rate->getNRate(); size_t ncat_mix = (model_factory->fused_mix_rate) ? ncat : ncat*model->getNMixtures(); size_t denom = (model_factory->fused_mix_rate) ? 1 : ncat; size_t block = ncat_mix * nstates; size_t ptn; // for big data size > 4GB memory required size_t c, i; size_t orig_nptn = aln->size(); size_t max_orig_nptn = ((orig_nptn+VectorClass::size()-1)/VectorClass::size())*VectorClass::size(); size_t nptn = max_orig_nptn+model_factory->unobserved_ptns.size(); bool isASC = model_factory->unobserved_ptns.size() > 0; vector<size_t> limits; computeBounds<VectorClass>(num_threads, nptn, limits); // double *trans_mat = new double[block*nstates]; double *trans_mat = buffer_partial_lh; double *buffer_partial_lh_ptr = buffer_partial_lh + block*nstates; for (c = 0; c < ncat_mix; c++) { size_t mycat = c%ncat; size_t m = c/denom; double len = site_rate->getRate(mycat) * dad_branch->length; double prop = site_rate->getProp(mycat) * model->getMixtureWeight(m); double *this_trans_mat = &trans_mat[c*nstatesqr]; model->computeTransMatrix(len, this_trans_mat, m); for (i = 0; i < nstatesqr; i++) this_trans_mat[i] *= prop; if (!rooted) { // if unrooted tree, multiply with frequency double state_freq[nstates]; model->getStateFrequency(state_freq, m); for (i = 0; i < nstates; i++) { for (size_t x = 0; x < nstates; x++) this_trans_mat[x] *= state_freq[i]; this_trans_mat += nstates; } } } VectorClass all_tree_lh(0.0); VectorClass all_prob_const(0.0); if (dad->isLeaf()) { // special treatment for TIP-INTERNAL NODE case // double *partial_lh_node = new double[(aln->STATE_UNKNOWN+1)*block]; double *partial_lh_node = buffer_partial_lh_ptr; buffer_partial_lh_ptr += get_safe_upper_limit((aln->STATE_UNKNOWN+1)*block); if (isRootLeaf(dad)) { for (c = 0; c < ncat_mix; c++) { double *lh_node = partial_lh_node + c*nstates; size_t m = c/denom; model->getStateFrequency(lh_node, m); double prop = site_rate->getProp(c%ncat) * model->getMixtureWeight(m); for (i = 0; i < nstates; i++) lh_node[i] *= prop; } } else { IntVector states_dad = aln->seq_states[dad->id]; states_dad.push_back(aln->STATE_UNKNOWN); // precompute information from one tip for (IntVector::iterator it = states_dad.begin(); it != states_dad.end(); it++) { double *lh_node = partial_lh_node +(*it)*block; double *lh_tip = tip_partial_lh + (*it)*nstates; double *trans_mat_tmp = trans_mat; for (c = 0; c < ncat_mix; c++) { for (i = 0; i < nstates; i++) { lh_node[i] = 0.0; for (size_t x = 0; x < nstates; x++) lh_node[i] += trans_mat_tmp[x] * lh_tip[x]; trans_mat_tmp += nstates; } lh_node += nstates; } } } // now do the real computation #ifdef _OPENMP #pragma omp parallel for private(ptn, i, c) schedule(static,1) num_threads(num_threads) #endif for (int thread_id = 0; thread_id < num_threads; thread_id++) { VectorClass vc_tree_lh(0.0), vc_prob_const(0.0); size_t ptn_lower = limits[thread_id]; size_t ptn_upper = limits[thread_id+1]; // first compute partial_lh for (vector<TraversalInfo>::iterator it = traversal_info.begin(); it != traversal_info.end(); it++) computePartialLikelihood(*it, ptn_lower, ptn_upper, thread_id); // reset memory for _pattern_lh_cat // memset(_pattern_lh_cat+ptn_lower*ncat_mix, 0, (ptn_upper-ptn_lower)*ncat_mix*sizeof(double)); double *vec_tip = buffer_partial_lh_ptr + block*VectorClass::size()*thread_id; for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass lh_ptn; lh_ptn.load_a(&ptn_invar[ptn]); VectorClass *lh_cat = (VectorClass*)(_pattern_lh_cat + ptn*ncat_mix); VectorClass *partial_lh_dad = (VectorClass*)(dad_branch->partial_lh + ptn*block); VectorClass *lh_node = (VectorClass*)vec_tip; //load tip vector for (i = 0; i < VectorClass::size(); i++) { double *lh_tip; if (isRootLeaf(dad)) lh_tip = partial_lh_node; else if (ptn+i < orig_nptn) lh_tip = partial_lh_node + block*(aln->at(ptn+i))[dad->id]; else if (ptn+i < max_orig_nptn) lh_tip = partial_lh_node + block*aln->STATE_UNKNOWN; else if (ptn+i < nptn) lh_tip = partial_lh_node + block*model_factory->unobserved_ptns[ptn+i-max_orig_nptn]; else lh_tip = partial_lh_node + block*aln->STATE_UNKNOWN; double *this_vec_tip = vec_tip+i; for (c = 0; c < block; c++) { *this_vec_tip = lh_tip[c]; this_vec_tip += VectorClass::size(); } } if (_pattern_lh_cat_state) { // naively compute pattern_lh per category per state VectorClass *lh_state = (VectorClass*)(_pattern_lh_cat_state + ptn*block); for (c = 0; c < ncat_mix; c++) { for (i=0; i < nstates; i++) { lh_cat[c] += (lh_state[i] = lh_node[i]*partial_lh_dad[i]); } lh_node += nstates; partial_lh_dad += nstates; lh_state += nstates; lh_ptn += lh_cat[c]; } } else { for (c = 0; c < ncat_mix; c++) { #ifdef KERNEL_FIX_STATES dotProductVec<VectorClass, VectorClass, nstates, FMA>(lh_node, partial_lh_dad, lh_cat[c]); #else dotProductVec<VectorClass, VectorClass, FMA>(lh_node, partial_lh_dad, lh_cat[c], nstates); #endif lh_node += nstates; partial_lh_dad += nstates; lh_ptn += lh_cat[c]; } } VectorClass vc_min_scale; double* vc_min_scale_ptr = (double*)&vc_min_scale; for (i = 0; i < VectorClass::size(); i++) { vc_min_scale_ptr[i] = dad_branch->scale_num[ptn+i]; } vc_min_scale *= LOG_SCALING_THRESHOLD; // lh_ptn = abs(lh_ptn); // assert(horizontal_and(lh_ptn > 0)); if (ptn < orig_nptn) { lh_ptn = log(lh_ptn) + vc_min_scale; lh_ptn.store_a(&_pattern_lh[ptn]); vc_tree_lh = mul_add(lh_ptn, VectorClass().load_a(&ptn_freq[ptn]), vc_tree_lh); } else { // ascertainment bias correction if (ptn+VectorClass::size() > nptn) { // cutoff the last entries if going beyond lh_ptn.cutoff(nptn-ptn); } // bugfix 2016-01-21, prob_const can be rescaled if (horizontal_or(vc_min_scale != 0.0)) { // some entries are rescaled double *lh_ptn_dbl = (double*)&lh_ptn; for (i = 0; i < VectorClass::size(); i++) if (vc_min_scale_ptr[i] != 0.0) lh_ptn_dbl[i] *= SCALING_THRESHOLD; } vc_prob_const += lh_ptn; } } // FOR ptn #ifdef _OPENMP #pragma omp critical #endif { all_tree_lh += vc_tree_lh; if (isASC) all_prob_const += vc_prob_const; } } // FOR thread_id } else { // both dad and node are internal nodes #ifdef _OPENMP #pragma omp parallel for private(ptn, i, c) schedule(static,1) num_threads(num_threads) #endif for (int thread_id = 0; thread_id < num_threads; thread_id++) { VectorClass vc_tree_lh(0.0), vc_prob_const(0.0); size_t ptn_lower = limits[thread_id]; size_t ptn_upper = limits[thread_id+1]; // first compute partial_lh for (vector<TraversalInfo>::iterator it = traversal_info.begin(); it != traversal_info.end(); it++) computePartialLikelihood(*it, ptn_lower, ptn_upper, thread_id); // reset memory for _pattern_lh_cat memset(_pattern_lh_cat+ptn_lower*ncat_mix, 0, (ptn_upper-ptn_lower)*ncat_mix*sizeof(double)); for (ptn = ptn_lower; ptn < ptn_upper; ptn+=VectorClass::size()) { VectorClass lh_ptn; lh_ptn.load_a(&ptn_invar[ptn]); VectorClass *lh_cat = (VectorClass*)(_pattern_lh_cat + ptn*ncat_mix); VectorClass *partial_lh_dad = (VectorClass*)(dad_branch->partial_lh + ptn*block); VectorClass *partial_lh_node = (VectorClass*)(node_branch->partial_lh + ptn*block); double *trans_mat_tmp = trans_mat; if (_pattern_lh_cat_state) { VectorClass *lh_state = (VectorClass*)(_pattern_lh_cat_state + ptn*block); for (c = 0; c < ncat_mix; c++) { for (i = 0; i < nstates; i++) { #ifdef KERNEL_FIX_STATES dotProductVec<VectorClass, double, nstates, FMA>(trans_mat_tmp, partial_lh_node, lh_state[i]); #else dotProductVec<VectorClass, double, FMA>(trans_mat_tmp, partial_lh_node, lh_state[i], nstates); #endif lh_cat[c] += (lh_state[i] *= partial_lh_dad[i]); trans_mat_tmp += nstates; } lh_ptn += lh_cat[c]; partial_lh_node += nstates; partial_lh_dad += nstates; lh_state += nstates; } } else { for (c = 0; c < ncat_mix; c++) { for (i = 0; i < nstates; i++) { VectorClass lh_state; #ifdef KERNEL_FIX_STATES dotProductVec<VectorClass, double, nstates, FMA>(trans_mat_tmp, partial_lh_node, lh_state); #else dotProductVec<VectorClass, double, FMA>(trans_mat_tmp, partial_lh_node, lh_state, nstates); #endif lh_cat[c] = mul_add(partial_lh_dad[i], lh_state, lh_cat[c]); trans_mat_tmp += nstates; } lh_ptn += lh_cat[c]; partial_lh_node += nstates; partial_lh_dad += nstates; } } VectorClass vc_min_scale; double* vc_min_scale_ptr = (double*)&vc_min_scale; for (i = 0; i < VectorClass::size(); i++) { vc_min_scale_ptr[i] = dad_branch->scale_num[ptn+i] + node_branch->scale_num[ptn+i]; } vc_min_scale *= LOG_SCALING_THRESHOLD; // lh_ptn = abs(lh_ptn); ASSERT(horizontal_and(lh_ptn > 0)); if (ptn < orig_nptn) { lh_ptn = log(lh_ptn) + vc_min_scale; lh_ptn.store_a(&_pattern_lh[ptn]); vc_tree_lh = mul_add(lh_ptn, VectorClass().load_a(&ptn_freq[ptn]), vc_tree_lh); } else { // ascertainment bias correction if (ptn+VectorClass::size() > nptn) { // cutoff the last entries if going beyond lh_ptn.cutoff(nptn-ptn); } // bugfix 2016-01-21, prob_const can be rescaled if (horizontal_or(vc_min_scale != 0.0)) { // some entries are rescaled double *lh_ptn_dbl = (double*)&lh_ptn; for (i = 0; i < VectorClass::size(); i++) if (vc_min_scale_ptr[i] != 0.0) lh_ptn_dbl[i] *= SCALING_THRESHOLD; } vc_prob_const += lh_ptn; } } // FOR ptn #ifdef _OPENMP #pragma omp critical #endif { all_tree_lh += vc_tree_lh; if (isASC) all_prob_const += vc_prob_const; } } // FOR thread_id } tree_lh = horizontal_add(all_tree_lh); if (!std::isfinite(tree_lh)) { model->writeInfo(cout); site_rate->writeInfo(cout); ASSERT(0 && "Numerical underflow for non-rev lh-branch"); } if (isASC) { // ascertainment bias correction double prob_const = horizontal_add(all_prob_const); if (prob_const >= 1.0 || prob_const < 0.0) { printTree(cout, WT_TAXON_ID + WT_BR_LEN + WT_NEWLINE); model->writeInfo(cout); } ASSERT(prob_const < 1.0 && prob_const >= 0.0); prob_const = log(1.0 - prob_const); for (ptn = 0; ptn < orig_nptn; ptn+=VectorClass::size()) (VectorClass().load_a(&_pattern_lh[ptn])-prob_const).store_a(&_pattern_lh[ptn]); tree_lh -= aln->getNSite()*prob_const; ASSERT(std::isfinite(tree_lh)); } return tree_lh; } #endif
CPSfield_utils.h
#ifndef CPS_FIELD_UTILS_H #define CPS_FIELD_UTILS_H CPS_START_NAMESPACE inline void compareFermion(const CPSfermion5D<ComplexD> &A, const CPSfermion5D<ComplexD> &B, const std::string &descr = "Ferms", const double tol = 1e-9){ double fail = 0.; for(int i=0;i<GJP.VolNodeSites()*GJP.SnodeSites();i++){ int x[5]; int rem = i; for(int ii=0;ii<5;ii++){ x[ii] = rem % GJP.NodeSites(ii); rem /= GJP.NodeSites(ii); } for(int f=0;f<GJP.Gparity()+1;f++){ for(int sc=0;sc<24;sc++){ double vbfm = *((double*)A.site_ptr(i,f) + sc); double vgrid = *((double*)B.site_ptr(i,f) + sc); double diff_rat = fabs( 2.0 * ( vbfm - vgrid )/( vbfm + vgrid ) ); double rat_grid_bfm = vbfm/vgrid; if(vbfm == 0.0 && vgrid == 0.0){ diff_rat = 0.; rat_grid_bfm = 1.; } if( (vbfm == 0.0 && fabs(vgrid) < 1e-50) || (vgrid == 0.0 && fabs(vbfm) < 1e-50) ){ diff_rat = 0.; rat_grid_bfm = 1.; } if(diff_rat > tol){ printf("Fail: (%d,%d,%d,%d,%d; %d; %d) A %g B %g rat_A_B %g fracdiff %g\n",x[0],x[1],x[2],x[3],x[4],f,sc,vbfm,vgrid,rat_grid_bfm,diff_rat); fail = 1.0; }//else printf("Pass: (%d,%d,%d,%d,%d; %d; %d) A %g B %g rat_A_B %g fracdiff %g\n",x[0],x[1],x[2],x[3],x[4],f,sc,vbfm,vgrid,rat_grid_bfm,diff_rat); } } } glb_max(&fail); if(fail!=0.0){ if(!UniqueID()){ printf("Failed %s check\n", descr.c_str()); fflush(stdout); } exit(-1); }else{ if(!UniqueID()){ printf("Passed %s check\n", descr.c_str()); fflush(stdout); } } } template<typename FieldType, typename my_enable_if<_equal<typename ComplexClassify<typename FieldType::FieldSiteType>::type, complex_double_or_float_mark>::value,int>::type = 0> inline void compareField(const FieldType &A, const FieldType &B, const std::string &descr = "Field", const double tol = 1e-9, bool print_all = false){ typedef typename FieldType::FieldSiteType::value_type value_type; double fail = 0.; for(int xf=0;xf<A.nfsites();xf++){ int f; int x[FieldType::FieldDimensionPolicy::EuclideanDimension]; A.fsiteUnmap(xf, x,f); for(int i=0;i<FieldType::FieldSiteSize;i++){ value_type const* av = (value_type const*)(A.fsite_ptr(xf)+i); value_type const* bv = (value_type const*)(B.fsite_ptr(xf)+i); for(int reim=0;reim<2;reim++){ value_type diff_rat = (av[reim] == 0.0 && bv[reim] == 0.0) ? 0.0 : fabs( 2.*(av[reim]-bv[reim])/(av[reim]+bv[reim]) ); if(diff_rat > tol || print_all){ if(!print_all) std::cout << "Fail: ("; else std::cout << "Pass: ("; for(int xx=0;xx<FieldType::FieldDimensionPolicy::EuclideanDimension-1;xx++) std::cout << x[xx] << ", "; std::cout << x[FieldType::FieldDimensionPolicy::EuclideanDimension-1]; std::cout << ") f=" << f << " reim " << reim << " A " << av[reim] << " B " << bv[reim] << " fracdiff " << diff_rat << std::endl; if(!print_all) fail = 1.; } } } } glb_max(&fail); if(fail!=0.0){ if(!UniqueID()){ printf("Failed %s check\n", descr.c_str()); fflush(stdout); } exit(-1); }else{ if(!UniqueID()){ printf("Passed %s check\n", descr.c_str()); fflush(stdout); } } } #ifdef USE_BFM inline void exportBFMcb(CPSfermion5D<ComplexD> &into, Fermion_t from, bfm_evo<double> &dwf, int cb, bool singleprec_evec = false){ Fermion_t zero_a = dwf.allocFermion(); #pragma omp parallel { dwf.set_zero(zero_a); } Fermion_t etmp = dwf.allocFermion(); Fermion_t tmp[2]; tmp[!cb] = zero_a; if(singleprec_evec){ const int len = 24 * dwf.node_cbvol * (1 + dwf.gparity) * dwf.cbLs; #pragma omp parallel for for(int j = 0; j < len; j++) { ((double*)etmp)[j] = ((float*)(from))[j]; } tmp[cb] = etmp; }else tmp[cb] = from; dwf.cps_impexFermion(into.ptr(),tmp,0); dwf.freeFermion(zero_a); dwf.freeFermion(etmp); } #endif #ifdef USE_GRID template<typename GridPolicies> inline void exportGridcb(CPSfermion5D<ComplexD> &into, typename GridPolicies::GridFermionField &from, typename GridPolicies::FgridFclass &latg){ Grid::GridCartesian *FGrid = latg.getFGrid(); typename GridPolicies::GridFermionField tmp_g(FGrid); tmp_g = Grid::zero; setCheckerboard(tmp_g, from); latg.ImportFermion((Vector*)into.ptr(), tmp_g); } #endif #ifdef USE_QMP //Cyclic permutation of *4D* CPSfield with std::complex type and FourDpolicy dimension policy //Conventions are direction of *data flow*: For shift n in direction +1 f'(x) = f(x-\hat i) so data is sent in the +x direction. #define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, complex_double_or_float_mark>::value && (_equal<DimensionPolicy,FourDpolicy>::value || _equal<DimensionPolicy,SpatialPolicy>::value) template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy> void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from, const int dir, const int pm, const int n, typename my_enable_if<CONDITION , const int>::type dummy = 0){ enum {Dimension = DimensionPolicy::EuclideanDimension}; assert(dir < Dimension); assert(n < GJP.NodeSites(dir)); assert(pm == 1 || pm == -1); if(&to == &from){ if(n==0) return; CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from); return cyclicPermute(to,tmpfrom,dir,pm,n); } if(n == 0){ to = from; return; } QMP_barrier(); //Prepare face to send. If we send in the + direction we need to collect the slice starting {L-n ... L-1} (inclusive), and if we send in the - dir we collect the slice {0... n-1} int bsites = n; //sites on boundary int bsizes[Dimension]; bsizes[dir] = n; int boff[Dimension]; boff[dir] = (pm == 1 ? GJP.NodeSites(dir)-n : 0); for(int i=0;i<Dimension;i++) if(i != dir){ bsizes[i] = GJP.NodeSites(i); bsites *= bsizes[i]; boff[i] = 0; } int flav_off = from.flav_offset(); int nf = from.nflavors(); int bufsz = bsites * SiteSize * nf; int halfbufsz = bufsz/2; QMP_mem_t *recv_mem = QMP_allocate_memory(bufsz * sizeof(mf_Complex)); mf_Complex *recv_buf = (mf_Complex *)QMP_get_memory_pointer(recv_mem); QMP_mem_t *send_mem = QMP_allocate_memory(bufsz * sizeof(mf_Complex)); mf_Complex *send_buf = (mf_Complex *)QMP_get_memory_pointer(send_mem); #pragma omp parallel for for(int i=0;i<bsites;i++){ int rem = i; int coor[Dimension]; for(int d=0;d<Dimension;d++){ coor[d] = rem % bsizes[d] + boff[d]; rem/=bsizes[d]; } mf_Complex const* site_ptr = from.site_ptr(coor); mf_Complex* bp = send_buf + i*SiteSize; memcpy(bp,site_ptr,SiteSize*sizeof(mf_Complex)); if(nf == 2){ site_ptr += flav_off; bp += halfbufsz; memcpy(bp,site_ptr,SiteSize*sizeof(mf_Complex)); } } QMP_barrier(); //Copy remaining sites from on-node data with shift int rsizes[Dimension]; rsizes[dir] = GJP.NodeSites(dir) - n; int rsites = GJP.NodeSites(dir) - n; //if we sent in the + direction we need to shift the remaining L-n sites {0...L-n-1} forwards by n to make way for a new slice at the left side //if we sent in the - direction we need to shift the remaining L-n sites {n ... L-1} backwards by n to make way for a new slice at the right side int roff[Dimension]; roff[dir] = (pm == 1 ? 0 : n); for(int i=0;i<Dimension;i++) if(i != dir){ rsizes[i] = GJP.NodeSites(i); rsites *= rsizes[i]; roff[i] = 0; } #pragma omp parallel for for(int i=0;i<rsites;i++){ int rem = i; int from_coor[Dimension]; for(int d=0;d<Dimension;d++){ from_coor[d] = rem % rsizes[d] + roff[d]; rem/=rsizes[d]; } int to_coor[Dimension]; memcpy(to_coor,from_coor,Dimension*sizeof(int)); to_coor[dir] = (pm == +1 ? from_coor[dir] + n : from_coor[dir] - n); mf_Complex const* from_ptr = from.site_ptr(from_coor); mf_Complex * to_ptr = to.site_ptr(to_coor); memcpy(to_ptr,from_ptr,SiteSize*sizeof(mf_Complex)); if(nf == 2){ from_ptr += flav_off; to_ptr += flav_off; memcpy(to_ptr,from_ptr,SiteSize*sizeof(mf_Complex)); } } //Send/receive QMP_msgmem_t send_msg = QMP_declare_msgmem(send_buf,bufsz * sizeof(mf_Complex)); QMP_msgmem_t recv_msg = QMP_declare_msgmem(recv_buf,bufsz * sizeof(mf_Complex)); QMP_msghandle_t send = QMP_declare_send_relative(send_msg, dir, pm, 0); QMP_msghandle_t recv = QMP_declare_receive_relative(recv_msg, dir, -pm, 0); QMP_start(recv); QMP_start(send); QMP_status_t send_status = QMP_wait(send); if (send_status != QMP_SUCCESS) QMP_error("Send failed in cyclicPermute: %s\n", QMP_error_string(send_status)); QMP_status_t rcv_status = QMP_wait(recv); if (rcv_status != QMP_SUCCESS) QMP_error("Receive failed in PassDataT: %s\n", QMP_error_string(rcv_status)); //Copy received face into position. For + shift the origin we copy into is the left-face {0..n-1}, for a - shift its the right-face {L-n .. L-1} boff[dir] = (pm == 1 ? 0 : GJP.NodeSites(dir)-n); #pragma omp parallel for for(int i=0;i<bsites;i++){ int rem = i; int coor[Dimension]; for(int d=0;d<Dimension;d++){ coor[d] = rem % bsizes[d] + boff[d]; rem/=bsizes[d]; } mf_Complex * site_ptr = to.site_ptr(coor); mf_Complex const* bp = recv_buf + i*SiteSize; memcpy(site_ptr,bp,SiteSize*sizeof(mf_Complex)); if(nf == 2){ site_ptr += flav_off; bp += halfbufsz; memcpy(site_ptr,bp,SiteSize*sizeof(mf_Complex)); } } QMP_free_msghandle(send); QMP_free_msghandle(recv); QMP_free_msgmem(send_msg); QMP_free_msgmem(recv_msg); QMP_free_memory(send_mem); QMP_free_memory(recv_mem); QMP_barrier(); } #undef CONDITION # ifdef USE_GRID #define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, grid_vector_complex_mark>::value && (_equal<DimensionPolicy,FourDSIMDPolicy>::value || _equal<DimensionPolicy,ThreeDSIMDPolicy>::value) //Version with SIMD vectorized data template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy> void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from, const int dir, const int pm, const int n, typename my_enable_if<CONDITION, const int>::type dummy = 0){ enum {Dimension = DimensionPolicy::EuclideanDimension}; assert(dir < Dimension); assert(n < GJP.NodeSites(dir)); assert(pm == 1 || pm == -1); if(&to == &from){ if(n==0) return; CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from); return cyclicPermute(to,tmpfrom,dir,pm,n); } if(n == 0){ to = from; return; } const int nsimd = mf_Complex::Nsimd(); //Use notation c (combined index), o (outer index) i (inner index) int bcsites = n; //sites on boundary int bcsizes[Dimension]; bcsizes[dir] = n; int bcoff[Dimension]; bcoff[dir] = (pm == 1 ? GJP.NodeSites(dir)-n : 0); int bcoff_postcomms[Dimension]; bcoff_postcomms[dir] = (pm == 1 ? 0 : GJP.NodeSites(dir)-n); for(int i=0;i<Dimension;i++) if(i != dir){ bcsizes[i] = GJP.NodeSites(i); bcsites *= bcsizes[i]; bcoff[i] = 0; bcoff_postcomms[i] = 0; } //Build table of points on face (both outer and inner index) int nf = from.nflavors(); int flav_off = from.flav_offset(); typedef typename Grid::GridTypeMapper<mf_Complex>::scalar_type scalarType; int bufsz = bcsites * SiteSize * nf; QMP_mem_t *recv_mem = QMP_allocate_memory(bufsz * sizeof(scalarType)); scalarType *recv_buf = (scalarType *)QMP_get_memory_pointer(recv_mem); QMP_mem_t *send_mem = QMP_allocate_memory(bufsz * sizeof(scalarType)); scalarType *send_buf = (scalarType *)QMP_get_memory_pointer(send_mem); int osites = from.nsites(); std::vector<int> to_oi_buf_map(nf * osites * nsimd); //map from outer and inner index of destination site to offset within buffer, used *after* comms. //map i + nsimd*(o + osites*f) as index #pragma omp parallel for for(int c=0;c<bcsites;c++){ int rem = c; int coor[Dimension]; for(int d=0;d<Dimension;d++){ coor[d] = rem % bcsizes[d]; rem/=bcsizes[d]; } int coor_dest[Dimension]; for(int d=0;d<Dimension;d++){ coor_dest[d] = coor[d] + bcoff_postcomms[d]; coor[d] += bcoff[d]; } int i = from.SIMDmap(coor); int o = from.siteMap(coor); int i_dest = from.SIMDmap(coor_dest); int o_dest = from.siteMap(coor_dest); Grid::Vector<scalarType> ounpacked(nsimd); for(int f=0;f<nf;f++){ mf_Complex const *osite_ptr = from.site_ptr(o,f); int send_buf_off = (c + bcsites*f)*SiteSize; scalarType* bp = send_buf + send_buf_off; to_oi_buf_map[ i_dest + nsimd*(o_dest+osites*f) ] = send_buf_off; for(int s=0;s<SiteSize;s++){ vstore(*(osite_ptr++), ounpacked.data()); *(bp++) = ounpacked[i]; } } } //Send/receive QMP_msgmem_t send_msg = QMP_declare_msgmem(send_buf,bufsz * sizeof(scalarType)); QMP_msgmem_t recv_msg = QMP_declare_msgmem(recv_buf,bufsz * sizeof(scalarType)); QMP_msghandle_t send = QMP_declare_send_relative(send_msg, dir, pm, 0); QMP_msghandle_t recv = QMP_declare_receive_relative(recv_msg, dir, -pm, 0); QMP_start(recv); QMP_start(send); QMP_status_t send_status = QMP_wait(send); if (send_status != QMP_SUCCESS) QMP_error("Send failed in cyclicPermute: %s\n", QMP_error_string(send_status)); QMP_status_t rcv_status = QMP_wait(recv); if (rcv_status != QMP_SUCCESS) QMP_error("Receive failed in PassDataT: %s\n", QMP_error_string(rcv_status)); //Copy remaining sites from on-node data with shift and pull in data from buffer simultaneously //if we sent in the + direction we need to shift the remaining L-n sites {0...L-n-1} forwards by n to make way for a new slice at the left side //if we sent in the - direction we need to shift the remaining L-n sites {n ... L-1} backwards by n to make way for a new slice at the right side //Problem is we don't want two threads writing to the same AVX register at the same time. Therefore we thread the loop over the destination SIMD vectors and work back std::vector< std::vector<int> > lane_offsets(nsimd, std::vector<int>(Dimension) ); for(int i=0;i<nsimd;i++) from.SIMDunmap(i, lane_offsets[i].data() ); #pragma omp parallel for for(int oto = 0;oto < osites; oto++){ int oto_base_coor[Dimension]; to.siteUnmap(oto,oto_base_coor); //For each destination lane compute the source site index and lane int from_lane[nsimd]; int from_osite_idx[nsimd]; //also use for recv_buf offsets for sites pulled over boundary for(int lane = 0; lane < nsimd; lane++){ int offrom_coor[Dimension]; for(int d=0;d<Dimension;d++) offrom_coor[d] = oto_base_coor[d] + lane_offsets[lane][d]; offrom_coor[dir] += (pm == 1 ? -n : n); if(offrom_coor[dir] < 0 || offrom_coor[dir] >= GJP.NodeSites(dir)){ from_lane[lane] = -1; //indicates data is in recv_buf from_osite_idx[lane] = to_oi_buf_map[ lane + nsimd*oto ]; //here is for flavor 0 - remember to offset for second flav }else{ from_lane[lane] = from.SIMDmap(offrom_coor); from_osite_idx[lane] = from.siteMap(offrom_coor); } } //Now loop over flavor and element within the site as well as SIMD lanes of the destination vector and gather what we need to poke - then poke it Grid::Vector<scalarType> towrite(nsimd); Grid::Vector<scalarType> unpack(nsimd); for(int f=0;f<nf;f++){ for(int s=0;s<SiteSize;s++){ for(int tolane=0;tolane<nsimd;tolane++){ if(from_lane[tolane] != -1){ mf_Complex const* from_osite_ptr = from.site_ptr(from_osite_idx[tolane], f) + s; vstore(*from_osite_ptr,unpack.data()); towrite[tolane] = unpack[ from_lane[tolane] ]; }else{ //data is in buffer towrite[tolane] = recv_buf[ from_osite_idx[tolane] + s + f*bcsites*SiteSize ]; } } mf_Complex* to_osite_ptr = to.site_ptr(oto,f) + s; vset(*to_osite_ptr, towrite.data()); } } } QMP_free_msghandle(send); QMP_free_msghandle(recv); QMP_free_msgmem(send_msg); QMP_free_msgmem(recv_msg); QMP_free_memory(send_mem); QMP_free_memory(recv_mem); QMP_barrier(); } #undef CONDITION # endif //ifdef USE_GRID #else //ifdef USE_QMP #define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, complex_double_or_float_mark>::value && (_equal<DimensionPolicy,FourDpolicy>::value || _equal<DimensionPolicy,SpatialPolicy>::value) template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy> void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from, const int dir, const int pm, const int n, typename my_enable_if<CONDITION , const int>::type dummy = 0){ enum {Dimension = DimensionPolicy::EuclideanDimension}; assert(dir < Dimension); assert(n < GJP.NodeSites(dir)); assert(pm == 1 || pm == -1); if(&to == &from){ if(n==0) return; CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from); return cyclicPermute(to,tmpfrom,dir,pm,n); } if(n == 0){ to = from; return; } const int nodes = GJP.Xnodes()*GJP.Ynodes()*GJP.Znodes()*GJP.Tnodes()*GJP.Snodes(); if(nodes != 1) ERR.General("","cyclicPermute","Parallel implementation requires QMP\n"); #pragma omp parallel for for(int i=0;i<from.nfsites();i++){ int f; int x[Dimension]; from.fsiteUnmap(i,x,f); x[dir] = (x[dir] + pm * n + 5*GJP.NodeSites(dir) ) % GJP.NodeSites(dir); const mf_Complex* from_ptr = from.fsite_ptr(i); mf_Complex* to_ptr = to.site_ptr(x,f); memcpy(to_ptr,from_ptr,SiteSize*sizeof(mf_Complex)); } } #undef CONDITION # ifdef USE_GRID #define CONDITION _equal<typename ComplexClassify<mf_Complex>::type, grid_vector_complex_mark>::value && (_equal<DimensionPolicy,FourDSIMDPolicy>::value || _equal<DimensionPolicy,ThreeDSIMDPolicy>::value) //Version with SIMD vectorized data template< typename mf_Complex, int SiteSize, typename DimensionPolicy, typename FlavorPolicy, typename AllocPolicy> void cyclicPermute(CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &to, const CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> &from, const int dir, const int pm, const int n, typename my_enable_if<CONDITION, const int>::type dummy = 0){ enum {Dimension = DimensionPolicy::EuclideanDimension}; assert(dir < Dimension); assert(n < GJP.NodeSites(dir)); assert(pm == 1 || pm == -1); if(&to == &from){ if(n==0) return; CPSfield<mf_Complex,SiteSize,DimensionPolicy,FlavorPolicy,AllocPolicy> tmpfrom(from); return cyclicPermute(to,tmpfrom,dir,pm,n); } if(n == 0){ to = from; return; } const int nodes = GJP.Xnodes()*GJP.Ynodes()*GJP.Znodes()*GJP.Tnodes()*GJP.Snodes(); if(nodes != 1) ERR.General("","cyclicPermute","Parallel implementation requires QMP\n"); const int nsimd = mf_Complex::Nsimd(); typedef typename mf_Complex::scalar_type scalar_type; const int nthr = omp_get_max_threads(); scalar_type* tmp_store_thr[nthr]; for(int i=0;i<nthr;i++) tmp_store_thr[i] = (scalar_type*)memalign(128,nsimd*sizeof(scalar_type)); #pragma omp parallel for for(int ofto=0;ofto<to.nfsites();ofto++){ //loop over outer site index const int me = omp_get_thread_num(); int f; int oxto[Dimension]; to.fsiteUnmap(ofto,oxto,f); mf_Complex* to_base_ptr = to.fsite_ptr(ofto); scalar_type* tmp_store = tmp_store_thr[me]; //indexed by destination lane mf_Complex const* from_base_ptrs[nsimd]; int from_lane_idx[nsimd]; for(int tolane = 0; tolane < nsimd; tolane++){ int ixto_off[Dimension]; to.SIMDunmap(tolane,ixto_off); //get offset of inner site on tolane int xfrom[Dimension]; for(int d=0;d<Dimension;d++) xfrom[d] = oxto[d] + ixto_off[d]; //full coord corresponding to tolane + outer site xfrom[dir] = (xfrom[dir] - pm * n + 5*GJP.NodeSites(dir) ) % GJP.NodeSites(dir); from_base_ptrs[tolane] = from.site_ptr(xfrom,f); from_lane_idx[tolane] = from.SIMDmap(xfrom); } for(int s=0;s<SiteSize;s++){ for(int tolane = 0; tolane < nsimd; tolane++) tmp_store[tolane] = *( (scalar_type*)(from_base_ptrs[tolane] + s) + from_lane_idx[tolane] ); //cast SIMD type to scalar type pointer vset(*(to_base_ptr + s), tmp_store); } } for(int i=0;i<nthr;i++) free(tmp_store_thr[i]); } #undef CONDITION # endif //ifdef USE_GRID #endif //ifdef USE_QMP inline int getShiftSign(const int of){ return of > 0 ? +1 : -1; } //Invoke multiple independent permutes to offset field by vector 'shift' assuming field is periodic template<typename FieldType> void shiftPeriodicField(FieldType &to, const FieldType &from, const std::vector<int> &shift){ int nd = shift.size(); //assume ascending: x,y,z,t int nshift_dirs = 0; for(int i=0;i<nd;i++) if(shift[i]!=0) ++nshift_dirs; if(nshift_dirs == 0){ if(&to != &from) to = from; return; }else if(nshift_dirs == 1){ for(int d=0;d<nd;d++){ if(shift[d] != 0){ cyclicPermute(to,from,d,getShiftSign(shift[d]),abs(shift[d]) ); return; } } }else{ FieldType tmp1 = from; FieldType tmp2 = from; FieldType * send = &tmp1; FieldType * recv = &tmp2; int shifts_done = 0; for(int d=0;d<nd;d++){ if(shift[d] != 0){ cyclicPermute(shifts_done < nshift_dirs-1 ? *recv : to,*send,d,getShiftSign(shift[d]),abs(shift[d]) ); ++shifts_done; if(shifts_done < nshift_dirs) std::swap(send,recv); else return; } } } } template<typename CPSfieldType> void fft(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false, typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, complex_double_or_float_mark>::value, const int>::type = 0 ){ typedef typename LocalToGlobalInOneDirMap<typename CPSfieldType::FieldDimensionPolicy>::type DimPolGlobalInOneDir; typedef CPSfieldGlobalInOneDir<typename CPSfieldType::FieldSiteType, CPSfieldType::FieldSiteSize, DimPolGlobalInOneDir, typename CPSfieldType::FieldFlavorPolicy, typename CPSfieldType::FieldAllocPolicy> CPSfieldTypeGlobalInOneDir; int dcount = 0; for(int mu=0;mu<CPSfieldType::FieldDimensionPolicy::EuclideanDimension;mu++) if(do_dirs[mu]){ CPSfieldTypeGlobalInOneDir tmp_dbl(mu); tmp_dbl.gather( dcount==0 ? from : into ); tmp_dbl.fft(inverse_transform); tmp_dbl.scatter(into); dcount ++; } } #ifdef USE_GRID template<typename CPSfieldType> void fft(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false, typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, grid_vector_complex_mark>::value, const int>::type = 0 ){ typedef typename Grid::GridTypeMapper<typename CPSfieldType::FieldSiteType>::scalar_type ScalarType; typedef typename CPSfieldType::FieldDimensionPolicy::EquivalentScalarPolicy ScalarDimPol; typedef CPSfield<ScalarType, CPSfieldType::FieldSiteSize, ScalarDimPol, typename CPSfieldType::FieldFlavorPolicy, StandardAllocPolicy> ScalarFieldType; NullObject null_obj; ScalarFieldType tmp_in(null_obj); ScalarFieldType tmp_out(null_obj); tmp_in.importField(from); fft(tmp_out, tmp_in, do_dirs, inverse_transform); tmp_out.exportField(into); } #endif template<typename CPSfieldType> void fft(CPSfieldType &fftme, const bool* do_dirs){ fft(fftme,fftme,do_dirs); } template<typename CPSfieldType> void fft_opt(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false, typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, complex_double_or_float_mark>::value, const int>::type = 0 ){ #ifndef USE_MPI fft(into,from,do_dirs,inverse_transform); #else enum { Dimension = CPSfieldType::FieldDimensionPolicy::EuclideanDimension }; int ndirs_fft = 0; for(int i=0;i<Dimension;i++) if(do_dirs[i]) ++ndirs_fft; if(! ndirs_fft ) return; //Need info on the MPI node mapping assert(GJP.Snodes() == 1); std::vector<int> node_map; getMPIrankMap(node_map); CPSfieldType tmp(from.getDimPolParams()); //we want the last fft to end up in 'into'. Intermediate FFTs cycle between into and tmp as temp storage. Thus for odd ndirs_fft, the first fft should output to 'into', for even it should output to 'tmp' CPSfieldType *tmp1, *tmp2; if(ndirs_fft % 2 == 1){ tmp1 = &into; tmp2 = &tmp; }else{ tmp1 = &tmp; tmp2 = &into; } CPSfieldType* src = tmp2; CPSfieldType* out = tmp1; int fft_count = 0; for(int mu=0; mu<Dimension; mu++){ if(do_dirs[mu]){ CPSfieldType const *msrc = fft_count == 0 ? &from : src; fft_opt_mu(*out, *msrc, mu, node_map, inverse_transform); ++fft_count; std::swap(src,out); } } #endif } #ifdef USE_MPI template<typename CPSfieldType> void fft_opt_mu(CPSfieldType &into, const CPSfieldType &from, const int mu, const std::vector<int> &node_map, const bool inverse_transform, typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, complex_double_or_float_mark>::value, const int>::type = 0 ){ enum {SiteSize = CPSfieldType::FieldSiteSize, Dimension = CPSfieldType::FieldDimensionPolicy::EuclideanDimension }; typedef typename CPSfieldType::FieldSiteType ComplexType; typedef typename ComplexType::value_type FloatType; typedef typename FFTWwrapper<FloatType>::complexType FFTComplex; const int nf = from.nflavors(); const int foff = from.flav_offset(); const int nthread = omp_get_max_threads(); //Eg for fft in X-direction, divide up Y,Z,T work over nodes in X-direction doing linear FFTs. const int munodesites = GJP.NodeSites(mu); const int munodes = GJP.Nodes(mu); const int mutotalsites = munodesites*munodes; const int munodecoor = GJP.NodeCoor(mu); const int n_orthdirs = Dimension - 1; FloatType Lmu(mutotalsites); int orthdirs[n_orthdirs]; //map of orthogonal directions to mu int total_work_munodes = 1; //sites orthogonal to FFT direction int o=0; for(int i=0;i< Dimension;i++) if(i!=mu){ total_work_munodes *= GJP.NodeSites(i); orthdirs[o++] = i; } //Divvy up work over othogonal directions int munodes_work[munodes]; int munodes_off[munodes]; for(int i=0;i<munodes;i++) thread_work(munodes_work[i],munodes_off[i], total_work_munodes, i, munodes); //use for node work instead :) //Get MPI ranks of nodes in mu direction int my_node_coor[4]; for(int i=0;i<4;i++) my_node_coor[i] = GJP.NodeCoor(i); int munodes_mpiranks[munodes]; for(int i=0;i<munodes;i++){ int munode_coor[4]; memcpy(munode_coor,my_node_coor,4*sizeof(int)); munode_coor[mu] = i; const int munode_lex = node_lex( munode_coor, 4 ); munodes_mpiranks[i] = node_map[munode_lex]; } //Gather send data ComplexType* send_bufs[munodes]; int send_buf_sizes[munodes]; for(int i=0;i<munodes;i++){ send_buf_sizes[i] = munodes_work[i] * munodesites * nf * SiteSize; send_bufs[i] = (ComplexType*)malloc( send_buf_sizes[i] * sizeof(ComplexType) ); for(int w = 0; w < munodes_work[i]; w++){ //index of orthogonal site within workload for i'th node in mu direction const int orthsite = munodes_off[i] + w; int coor_base[Dimension] = {0}; //Unmap orthsite into a base coordinate int rem = orthsite; for(int a=0;a<n_orthdirs;a++){ const int dir_a = orthdirs[a]; coor_base[dir_a] = rem % GJP.NodeSites(dir_a); rem /= GJP.NodeSites(dir_a); } for(int f=0;f<nf;f++){ for(int xmu=0;xmu<munodesites;xmu++){ ComplexType* to = send_bufs[i] + SiteSize * (w + munodes_work[i]*( f + nf*xmu ) ); //with musite changing slowest coor_base[mu] = xmu; ComplexType const* frm = from.site_ptr(coor_base,f); memcpy(to,frm,SiteSize*sizeof(ComplexType)); } } } } MPI_Request send_req[munodes]; MPI_Request recv_req[munodes]; MPI_Status status[munodes]; //Prepare recv buf const int bufsz = munodes_work[munodecoor] * mutotalsites * nf * SiteSize; //complete line in mu for each orthogonal coordinate ComplexType* recv_buf = (ComplexType*)malloc(bufsz * sizeof(ComplexType) ); //Setup send/receive for(int i=0;i<munodes;i++){ //works fine to send to all nodes, even if this involves a send to self. int sret = MPI_Isend(send_bufs[i], send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], 0, MPI_COMM_WORLD, &send_req[i]); assert(sret == MPI_SUCCESS); int rret = MPI_Irecv(recv_buf + i*munodes_work[munodecoor]*nf*SiteSize*munodesites, send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], MPI_ANY_TAG, MPI_COMM_WORLD, &recv_req[i]); assert(rret == MPI_SUCCESS); } int wret = MPI_Waitall(munodes,recv_req,status); assert(wret == MPI_SUCCESS); //Do FFT const int howmany = munodes_work[munodecoor] * nf * SiteSize; const int howmany_per_thread_base = howmany / nthread; //Divide work orthogonal to mu, 'howmany', over threads. Note, this may not divide howmany equally. The difference is made up by adding 1 unit of work to threads in ascending order until total work matches. Thus we need 2 plans: 1 for the base amount and one for the base+1 //if(!UniqueID()) printf("FFT work per site %d, divided over %d threads with %d work each. Remaining work %d allocated to ascending threads\n", howmany, nthread, howmany_per_thread_base, howmany - howmany_per_thread_base*nthread); int fft_phase = inverse_transform ? FFTW_BACKWARD : FFTW_FORWARD; static FFTplanContainer<FloatType> plan_f_base[Dimension]; //destructors deallocate plans static FFTplanContainer<FloatType> plan_f_base_p1[Dimension]; static int plan_howmany[Dimension]; static bool plan_init = false; static int plan_fft_phase; if(!plan_init || plan_howmany[mu] != howmany || fft_phase != plan_fft_phase){ if(!plan_init) for(int i=0;i<Dimension;i++) plan_howmany[i] = -1; typename FFTWwrapper<FloatType>::complexType *tmp_f; //I don't think it actually does anything with this plan_fft_phase = fft_phase; const int fft_work_per_musite = howmany_per_thread_base; const int musite_stride = howmany; //stride between musites plan_f_base[mu].setPlan(1, &mutotalsites, fft_work_per_musite, tmp_f, NULL, musite_stride, 1, tmp_f, NULL, musite_stride, 1, plan_fft_phase, FFTW_ESTIMATE); plan_f_base_p1[mu].setPlan(1, &mutotalsites, fft_work_per_musite+1, tmp_f, NULL, musite_stride, 1, tmp_f, NULL, musite_stride, 1, plan_fft_phase, FFTW_ESTIMATE); plan_init = true; //other mu's will still init later } FFTComplex*fftw_mem = (FFTComplex*)recv_buf; #pragma omp parallel { assert(nthread == omp_get_num_threads()); //plans will be messed up if not true const int me = omp_get_thread_num(); int thr_work, thr_off; thread_work(thr_work, thr_off, howmany, me, nthread); const FFTplanContainer<FloatType>* thr_plan_ptr; if(thr_work == howmany_per_thread_base) thr_plan_ptr = &plan_f_base[mu]; else if(thr_work == howmany_per_thread_base + 1) thr_plan_ptr = &plan_f_base_p1[mu]; else assert(0); //catch if logic for thr_work changes FFTWwrapper<FloatType>::execute_dft(thr_plan_ptr->getPlan(), fftw_mem + thr_off, fftw_mem + thr_off); } wret = MPI_Waitall(munodes,send_req,status); assert(wret == MPI_SUCCESS); //Send back out. Reuse the old send buffers as receive buffers and vice versa for(int i=0;i<munodes;i++){ //works fine to send to all nodes, even if this involves a send to self int sret = MPI_Isend(recv_buf + i*munodes_work[munodecoor]*nf*SiteSize*munodesites, send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], 0, MPI_COMM_WORLD, &send_req[i]); assert(sret == MPI_SUCCESS); int rret = MPI_Irecv(send_bufs[i], send_buf_sizes[i]*sizeof(ComplexType), MPI_CHAR, munodes_mpiranks[i], MPI_ANY_TAG, MPI_COMM_WORLD, &recv_req[i]); assert(rret == MPI_SUCCESS); } wret = MPI_Waitall(munodes,recv_req,status); assert(wret == MPI_SUCCESS); //Poke into output for(int i=0;i<munodes;i++){ #pragma omp parallel for for(int w = 0; w < munodes_work[i]; w++){ //index of orthogonal site within workload for i'th node in mu direction const int orthsite = munodes_off[i] + w; int coor_base[Dimension] = {0}; //Unmap orthsite into a base coordinate int rem = orthsite; for(int a=0;a<n_orthdirs;a++){ int dir_a = orthdirs[a]; coor_base[dir_a] = rem % GJP.NodeSites(dir_a); rem /= GJP.NodeSites(dir_a); } for(int f=0;f<nf;f++){ for(int xmu=0;xmu<munodesites;xmu++){ coor_base[mu] = xmu; ComplexType* to = into.site_ptr(coor_base,f); ComplexType const* frm = send_bufs[i] + SiteSize * (w + munodes_work[i]*( f + nf*xmu ) ); if(!inverse_transform) memcpy(to,frm,SiteSize*sizeof(ComplexType)); else for(int s=0;s<SiteSize;s++) to[s] = frm[s]/Lmu; } } } } wret = MPI_Waitall(munodes,send_req,status); assert(wret == MPI_SUCCESS); free(recv_buf); for(int i=0;i<munodes;i++) free(send_bufs[i]); } #endif #ifdef USE_GRID template<typename CPSfieldType> void fft_opt(CPSfieldType &into, const CPSfieldType &from, const bool* do_dirs, const bool inverse_transform = false, typename my_enable_if<_equal<typename ComplexClassify<typename CPSfieldType::FieldSiteType>::type, grid_vector_complex_mark>::value, const int>::type = 0 ){ //we can avoid the copies below but with some effort - do at some point # ifdef USE_MPI fft(into,from,do_dirs,inverse_transform); # else typedef typename Grid::GridTypeMapper<typename CPSfieldType::FieldSiteType>::scalar_type ScalarType; typedef typename CPSfieldType::FieldDimensionPolicy::EquivalentScalarPolicy ScalarDimPol; typedef CPSfield<ScalarType, CPSfieldType::FieldSiteSize, ScalarDimPol, typename CPSfieldType::FieldFlavorPolicy, StandardAllocPolicy> ScalarFieldType; NullObject null_obj; ScalarFieldType tmp_in(null_obj); ScalarFieldType tmp_out(null_obj); tmp_in.importField(from); fft_opt(tmp_out, tmp_in, do_dirs, inverse_transform); tmp_out.exportField(into); # endif } #endif CPS_END_NAMESPACE #endif
task_priority.c
void foo (float *a, int N) { int i; #pragma omp parallel private(i) #pragma omp single { for (i=0;i<N; i++) { #pragma omp task priority(i) a[i]= 0.5 ; } } }
convolution_3x3_pack1ton.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1ton_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl); out0.fill(_bias0); const float* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); vfloat32m1_t _k00 = vle32_v_f32m1(k0, vl); vfloat32m1_t _k01 = vle32_v_f32m1(k0 + packn, vl); vfloat32m1_t _k02 = vle32_v_f32m1(k0 + packn * 2, vl); vfloat32m1_t _k10 = vle32_v_f32m1(k0 + packn * 3, vl); vfloat32m1_t _k11 = vle32_v_f32m1(k0 + packn * 4, vl); vfloat32m1_t _k12 = vle32_v_f32m1(k0 + packn * 5, vl); vfloat32m1_t _k20 = vle32_v_f32m1(k0 + packn * 6, vl); vfloat32m1_t _k21 = vle32_v_f32m1(k0 + packn * 7, vl); vfloat32m1_t _k22 = vle32_v_f32m1(k0 + packn * 8, vl); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl); vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl); vfloat32m1_t _sum4 = vle32_v_f32m1(outptr0 + packn * 4, vl); vfloat32m1_t _sum5 = vle32_v_f32m1(outptr0 + packn * 5, vl); vfloat32m1_t _sum6 = vle32_v_f32m1(outptr0 + packn * 6, vl); vfloat32m1_t _sum7 = vle32_v_f32m1(outptr0 + packn * 7, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[1], _k00, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[2], _k00, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[3], _k00, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[4], _k00, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[5], _k00, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[6], _k00, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[7], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k01, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[3], _k01, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[4], _k01, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[5], _k01, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[6], _k01, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[7], _k01, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[8], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k02, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k02, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[5], _k02, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[6], _k02, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[7], _k02, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[8], _k02, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[9], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[1], _k10, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[2], _k10, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[3], _k10, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[4], _k10, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[5], _k10, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[6], _k10, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[7], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k11, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[3], _k11, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[4], _k11, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[5], _k11, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[6], _k11, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[7], _k11, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[8], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k12, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k12, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[5], _k12, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[6], _k12, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[7], _k12, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[8], _k12, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[9], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[1], _k20, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[2], _k20, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[3], _k20, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[4], _k20, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[5], _k20, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[6], _k20, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[7], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k21, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[3], _k21, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[4], _k21, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[5], _k21, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[6], _k21, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[7], _k21, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[8], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k22, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k22, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[5], _k22, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[6], _k22, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[7], _k22, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[8], _k22, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[9], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl); vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl); vse32_v_f32m1(outptr0 + packn * 4, _sum4, vl); vse32_v_f32m1(outptr0 + packn * 5, _sum5, vl); vse32_v_f32m1(outptr0 + packn * 6, _sum6, vl); vse32_v_f32m1(outptr0 + packn * 7, _sum7, vl); outptr0 += packn * 8; r0 += 8; r1 += 8; r2 += 8; } for (; j + 3 < outw; j += 4) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl); vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[1], _k00, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[2], _k00, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[3], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k01, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[3], _k01, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[4], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k02, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k02, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[5], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[1], _k10, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[2], _k10, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[3], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k11, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[3], _k11, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[4], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k12, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k12, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[5], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[1], _k20, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[2], _k20, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[3], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k21, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[3], _k21, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[4], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k22, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k22, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[5], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl); vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl); outptr0 += packn * 4; r0 += 4; r1 += 4; r2 += 4; } for (; j + 1 < outw; j += 2) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[1], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[1], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[1], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); outptr0 += packn * 2; r0 += 2; r1 += 2; r2 += 2; } for (; j < outw; j++) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); outptr0 += packn; r0 += 1; r1 += 1; r2 += 1; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * packn; } } } static void conv3x3s2_pack1ton_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl); out0.fill(_bias0); const float* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); vfloat32m1_t _k00 = vle32_v_f32m1(k0, vl); vfloat32m1_t _k01 = vle32_v_f32m1(k0 + packn, vl); vfloat32m1_t _k02 = vle32_v_f32m1(k0 + packn * 2, vl); vfloat32m1_t _k10 = vle32_v_f32m1(k0 + packn * 3, vl); vfloat32m1_t _k11 = vle32_v_f32m1(k0 + packn * 4, vl); vfloat32m1_t _k12 = vle32_v_f32m1(k0 + packn * 5, vl); vfloat32m1_t _k20 = vle32_v_f32m1(k0 + packn * 6, vl); vfloat32m1_t _k21 = vle32_v_f32m1(k0 + packn * 7, vl); vfloat32m1_t _k22 = vle32_v_f32m1(k0 + packn * 8, vl); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl); vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl); vfloat32m1_t _sum4 = vle32_v_f32m1(outptr0 + packn * 4, vl); vfloat32m1_t _sum5 = vle32_v_f32m1(outptr0 + packn * 5, vl); vfloat32m1_t _sum6 = vle32_v_f32m1(outptr0 + packn * 6, vl); vfloat32m1_t _sum7 = vle32_v_f32m1(outptr0 + packn * 7, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k00, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k00, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[6], _k00, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[8], _k00, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[10], _k00, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[12], _k00, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[14], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k01, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[5], _k01, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[7], _k01, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[9], _k01, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[11], _k01, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[13], _k01, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[15], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[4], _k02, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[6], _k02, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[8], _k02, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r0[10], _k02, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r0[12], _k02, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r0[14], _k02, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r0[16], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k10, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k10, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[6], _k10, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[8], _k10, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[10], _k10, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[12], _k10, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[14], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k11, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[5], _k11, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[7], _k11, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[9], _k11, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[11], _k11, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[13], _k11, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[15], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[4], _k12, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[6], _k12, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[8], _k12, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r1[10], _k12, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r1[12], _k12, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r1[14], _k12, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r1[16], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k20, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k20, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[6], _k20, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[8], _k20, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[10], _k20, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[12], _k20, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[14], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k21, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[5], _k21, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[7], _k21, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[9], _k21, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[11], _k21, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[13], _k21, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[15], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[4], _k22, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[6], _k22, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[8], _k22, vl); _sum4 = vfmacc_vf_f32m1(_sum4, r2[10], _k22, vl); _sum5 = vfmacc_vf_f32m1(_sum5, r2[12], _k22, vl); _sum6 = vfmacc_vf_f32m1(_sum6, r2[14], _k22, vl); _sum7 = vfmacc_vf_f32m1(_sum7, r2[16], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl); vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl); vse32_v_f32m1(outptr0 + packn * 4, _sum4, vl); vse32_v_f32m1(outptr0 + packn * 5, _sum5, vl); vse32_v_f32m1(outptr0 + packn * 6, _sum6, vl); vse32_v_f32m1(outptr0 + packn * 7, _sum7, vl); outptr0 += packn * 8; r0 += 16; r1 += 16; r2 += 16; } for (; j + 3 < outw; j += 4) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl); vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k00, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k00, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[6], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k01, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[5], _k01, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[7], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[4], _k02, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r0[6], _k02, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r0[8], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k10, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k10, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[6], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k11, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[5], _k11, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[7], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[4], _k12, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r1[6], _k12, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r1[8], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k20, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k20, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[6], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k21, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[5], _k21, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[7], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[4], _k22, vl); _sum2 = vfmacc_vf_f32m1(_sum2, r2[6], _k22, vl); _sum3 = vfmacc_vf_f32m1(_sum3, r2[8], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl); vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl); outptr0 += packn * 4; r0 += 8; r1 += 8; r2 += 8; } for (; j + 1 < outw; j += 2) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r0[4], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r1[4], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f32m1(_sum1, r2[4], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); vse32_v_f32m1(outptr0 + packn, _sum1, vl); outptr0 += packn * 2; r0 += 4; r1 += 4; r2 += 4; } for (; j < outw; j++) { vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl); _sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl); vse32_v_f32m1(outptr0, _sum0, vl); outptr0 += packn; r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * packn; } } }
fast_gaussian_blur_template.h
#pragma once #include <type_traits> #include <algorithm> #include <cmath> template<typename T, int C> void horizontal_blur_template(T * in, T * out, const int w, const int h, const int r) { float iarr = 1.f / (r+r+1); #pragma omp parallel for for(int i=0; i<h; i++) { int ti = i*w; int li = ti; int ri = ti+r; std::conditional_t<std::is_integral<T>::value, int, float> fv[C], lv[C], val[C]; for(int ch = 0; ch < C; ++ch) { fv[ch] = in[ti*C+ch]; lv[ch] = in[(ti+w-1)*C+ch]; val[ch] = (r+1)*fv[ch]; } for(int j=0; j<r; j++) for(int ch = 0; ch < C; ++ch) { val[ch] += in[(ti+j)*C+ch]; } for(int j=0; j<=r; j++, ri++, ti++) for(int ch = 0; ch < C; ++ch) { val[ch] += in[ri*C+ch] - fv[ch]; if( std::is_integral<T>::value ) out[ti*C+ch] = val[ch]*iarr+0.5f; // avoid std::round by adding 0.5f and cast to integer type else out[ti*C+ch] = val[ch]*iarr; } for(int j=r+1; j<w-r; j++, ri++, ti++, li++) for(int ch = 0; ch < C; ++ch) { val[ch] += in[ri*C+ch] - in[li*C+ch]; if( std::is_integral<T>::value ) out[ti*C+ch] = val[ch]*iarr+0.5f; // avoid std::round by adding 0.5f and cast to integer type else out[ti*C+ch] = val[ch]*iarr; } for(int j=w-r; j<w; j++, ti++, li++) for(int ch = 0; ch < C; ++ch) { val[ch] += lv[ch] - in[li*C+ch]; if( std::is_integral<T>::value ) out[ti*C+ch] = val[ch]*iarr+0.5f; // avoid std::round by adding 0.5f and cast to integer type else out[ti*C+ch] = val[ch]*iarr; } } } template<typename T> void horizontal_blur_template_dispatch(T * in, T * out, const int w, const int h, const int c, const int r) { switch(c) { case 1: horizontal_blur_template<T,1>(in, out, w, h, r); break; case 2: horizontal_blur_template<T,2>(in, out, w, h, r); break; case 3: horizontal_blur_template<T,3>(in, out, w, h, r); break; case 4: horizontal_blur_template<T,4>(in, out, w, h, r); break; default: printf("%d channels is not supported yet. Add a specific case if possible or fall back to the generic version.", c); break; } } template<typename T, int C> void flip_block_template(T * in, T * out, const int w, const int h) { constexpr int block = 256; #pragma omp parallel for collapse(2) for(int x= 0; x < w; x+= block) { for(int y= 0; y < h; y+= block) { T * p = in + y*w*C + x*C; T * q = out + y*C + x*h*C; const int blockx= std::min(w, x+block) - x; const int blocky= std::min(h, y+block) - y; for(int xx= 0; xx < blockx; xx++) { for(int yy= 0; yy < blocky; yy++) { for(int k= 0; k < C; k++) q[k]= p[k]; //~ std::memcpy(q, p, C); p+= w*C; q+= C; } // repositionne les pointeurs sur le prochain pixel p+= -blocky*w*C + C; q+= -blocky*C + h*C; } } } } template<typename T> void flip_block_template_dispatch(T * in, T * out, const int w, const int h, const int c) { switch(c) { case 1: flip_block_template<T,1>(in, out, w, h); break; case 2: flip_block_template<T,2>(in, out, w, h); break; case 3: flip_block_template<T,3>(in, out, w, h); break; case 4: flip_block_template<T,4>(in, out, w, h); break; default: printf("%d channels is not supported yet. Add a specific case if possible or fall back to the generic version.", c); break; } } template<typename T> void fast_gaussian_blur_template(T *& in, T *& out, int w, int h, int c, float sigma) { int n = 3; int boxes[3]; sigma_to_box_radius(boxes, sigma, n); horizontal_blur_template_dispatch(in, out, w, h, c, boxes[0]); horizontal_blur_template_dispatch(out, in, w, h, c, boxes[1]); horizontal_blur_template_dispatch(in, out, w, h, c, boxes[2]); flip_block_template_dispatch(out, in, w, h, c); horizontal_blur_template_dispatch(in, out, h, w, c, boxes[0]); horizontal_blur_template_dispatch(out, in, h, w, c, boxes[1]); horizontal_blur_template_dispatch(in, out, h, w, c, boxes[2]); flip_block_template_dispatch(out, in, h, w, c); std::swap(in, out); }
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; INIT(); // // Test: omp_get_thread_num() // ZERO(A); TEST({ // Master in the serial section has thread id 0. _Pragma("omp parallel master") { int tid = omp_get_thread_num(); A[tid] += tid; } // Expecting to start 128 parallel threads. _Pragma("omp parallel num_threads(128)") { // Workers in parallel section have thread ids 0 ... 223 int tid = omp_get_thread_num(); A[tid] += tid; } }, VERIFY(0, 128, A[i], i*(trial+1))); // // Test: Execute parallel on device // TEST({ _Pragma("omp parallel num_threads(128)") { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = D[j] + E[j]; } } }, VERIFY(0, 512, B[i], (double)0)); // // Test: if clause serial execution of parallel region on target // ZERO(A); TEST({ _Pragma("omp parallel num_threads(128) if(0)") { int tid = omp_get_thread_num(); A[tid] = tid; } }, VERIFY(0, 128, A[i], 0)); // // Test: if clause serial execution of parallel region on target // ZERO(A); TEST({ _Pragma("omp parallel num_threads(128) if(A[1] == 1)") { int tid = omp_get_thread_num(); A[tid] = tid; } }, VERIFY(0, 128, A[i], 0)); // // Test: if clause parallel execution of parallel region on target // ZERO(A); TEST({ _Pragma("omp parallel num_threads(128) if(A[1] == 0)") { int tid = omp_get_thread_num(); A[tid] = tid; } }, VERIFY(0, 128, A[i], i)); // // Test: proc_bind clause // TEST({ _Pragma("omp parallel num_threads(128) proc_bind(master)") { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = 1 + D[j] + E[j]; } } _Pragma("omp parallel num_threads(128) proc_bind(close)") { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] += 1 + D[j] + E[j]; } } _Pragma("omp parallel num_threads(128) proc_bind(spread)") { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] += 1 + D[j] + E[j]; } } }, VERIFY(0, 512, B[i], 3)); // // Test: num_threads on parallel. // We assume a maximum of 128 threads in the parallel region (32 are // reserved for the master warp). // // This test fails on Volta because a parallel region can only contain // <=32 or a multiple of 32 workers. for (int t = 1; t <= 128; t += t < 32 ? 31 : 32) { ZERO(A); int threads[1]; threads[0] = t; TEST({ _Pragma("omp parallel num_threads(threads[0]) if(1)") { int tid = omp_get_thread_num(); A[tid] = 99; } }, VERIFY(0, 128, A[i], 99*(i < t))); } // // Test: sharing of variables from master to parallel region. // FIXME: Currently we don't have support to share variables from // master to workers, so we're doing "serialized" parallel execution. // ZERO(A); TEST({ double tmp = 1; A[0] = tmp; _Pragma("omp parallel if(0)") { tmp = 2; A[0] += tmp; } A[0] += tmp; }, VERIFY(0, 1, A[i], 5)); // // Test: private clause on parallel region. // FIXME: Currently we don't have support to share variables from // master to workers, so we're doing "serialized" parallel execution. // ZERO(A); TEST({ double tmp = 1; A[0] = tmp; _Pragma("omp parallel private(tmp) if(0)") { tmp = 2; A[0] += tmp; } A[0] += tmp; }, VERIFY(0, 1, A[i], 4)); // // Test: firstprivate clause on parallel region. // FIXME: Currently we don't have support to share variables from // master to workers, so we're doing "serialized" parallel execution. // ZERO(A); TEST({ double tmp = 1; A[0] = tmp; _Pragma("omp parallel firstprivate(tmp) if(0)") { tmp += 2; A[0] += tmp; } A[0] += tmp; }, VERIFY(0, 1, A[i], 5)); // // Test: shared clause on parallel region. // FIXME: Currently we don't have support to share variables from // master to workers, so we're doing "serialized" parallel execution. // ZERO(A); TEST({ double tmp = 1; A[0] = tmp; double distance = 21; _Pragma("omp parallel firstprivate(tmp) shared(distance) if(0)") { distance += 9; tmp += 2 + distance; A[0] += tmp; } A[0] += tmp + distance; }, VERIFY(0, 1, A[i], 65)); // // Test: sharing of array from master to parallel region. // ZERO(A); ZERO(B); TEST({ for (int i = 0; i < 128; i++) { B[i] = 0; A[i] = 99 + B[i]; B[i] = 1; } _Pragma("omp parallel num_threads(128)") { int tid = omp_get_thread_num(); A[tid] += 1; B[tid] += 2; } for (int i = 0; i < 128; i++) { A[i] += B[i]; } }, VERIFY(0, 128, A[i], 103)); // // Test: array private clause on parallel region. // ZERO(A); ZERO(B); TEST({ for (int i = 0; i < 128; i++) { B[i] = 0; A[i] = 99 + B[i]; B[i] = 1; } _Pragma("omp parallel num_threads(128) private(B) if(1)") { int tid = omp_get_thread_num(); A[tid] += 1; B[tid] = 2; } for (int i = 0; i < 128; i++) { A[i] += B[i]; } }, VERIFY(0, 128, A[i], 101)); // // Test: array firstprivate clause on parallel region. // ZERO(A); ZERO(B); TEST({ for (int i = 0; i < 128; i++) { B[i] = 0; A[i] = 99 + B[i]; B[i] = 2; } _Pragma("omp parallel num_threads(128) firstprivate(B)") { int tid = omp_get_thread_num(); B[tid] += 8; A[tid] += B[tid]; } for (int i = 0; i < 128; i++) { A[i] += B[i]; } }, VERIFY(0, 128, A[i], 111)); // // Test: array shared clause on parallel region. // FIXME: Currently we don't have support to share variables from // master to workers, so we're doing "serialized" parallel execution. // ZERO(A); ZERO(B); TEST({ B[0] = 0; A[0] = 99 + B[0]; B[0] = 2; double distance[32]; distance[30] = 21; _Pragma("omp parallel firstprivate(B) shared(distance, A) if(0)") { distance[30] += 9; B[0] += 8; A[0] += B[0] + distance[30]; } A[0] += B[0] + distance[30]; }, VERIFY(0, 1, A[i], 171)); struct CITY { char name[128]; int distance_to_nyc; }; struct CONTEXT { struct CITY city; double A[N]; double B[N]; }; struct CONTEXT data; // // Test: omp_get_thread_num() // strcpy(data.city.name, "dobbs ferry"); data.city.distance_to_nyc = 21; ZERO(data.A); TEST({ // Master in the serial section has thread id 0. _Pragma("omp parallel master") { int tid = omp_get_thread_num(); data.A[tid] += tid + (int) data.city.name[1] + data.city.distance_to_nyc; } // Expecting to start 128 parallel threads. _Pragma("omp parallel num_threads(128)") { // Workers in parallel section have thread ids 0 ... 127 int tid = omp_get_thread_num(); data.A[1+tid] += 1+tid + (int) data.city.name[1] + data.city.distance_to_nyc; } }, VERIFY(0, 128, data.A[i], (132 + i)*(trial+1))); // // Test: sharing of struct from master to parallel region. // ZERO(data.A); ZERO(data.B); TEST({ for (int i = 0; i < 128; i++) { data.B[i] = 0; data.A[i] = 99 + data.B[i]; data.B[i] = 1; } _Pragma("omp parallel num_threads(128)") { int tid = omp_get_thread_num(); data.A[tid] += 1; data.B[tid] += 2; } for (int i = 0; i < 128; i++) { data.A[i] += data.B[i]; } }, VERIFY(0, 128, data.A[i], 103)); // // Test: struct private clause on parallel region. // ZERO(data.A); ZERO(data.B); TEST({ for (int i = 0; i < 128; i++) { data.B[i] = 0; data.A[i] = 99 + data.B[i]; data.B[i] = 1; } _Pragma("omp parallel num_threads(128) private(data)") { int tid = omp_get_thread_num(); data.A[tid] += 1; data.B[tid] = 2; } for (int i = 0; i < 128; i++) { data.A[i] += data.B[i]; } }, VERIFY(0, 1, data.A[i], 100)); // // Test: struct firstprivate clause on parallel region. // FIXME: Currently we don't have support to share variables from // master to workers, so we're doing "serialized" parallel execution. // ZERO(data.A); ZERO(data.B); TEST({ data.B[0] = 0; data.A[0] = 99 + data.B[0]; data.B[0] = 2; double tmp; _Pragma("omp parallel firstprivate(data) if(0)") { data.B[0] += 8; data.A[0] += data.B[0]; tmp = data.A[0]; } data.A[0] += data.B[0] + tmp; }, VERIFY(0, 1, data.A[i], 210)); // // Test: struct shared clause on parallel region. // FIXME: Currently we don't have support to share variables from // master to workers, so we're doing "serialized" parallel execution. // ZERO(A); ZERO(B); TEST({ B[0] = 0; A[0] = 99 + B[0]; B[0] = 2; struct CITY city; city.distance_to_nyc = 21; _Pragma("omp parallel firstprivate(B) shared(city, A) if(0)") { city.distance_to_nyc += 9; B[0] += 8; A[0] += B[0] + city.distance_to_nyc; } A[0] += B[0] + city.distance_to_nyc; }, VERIFY(0, 1, A[i], 171)); return 0; }
omp_rw_lock.h
/* @copyright Russell Standish 2000-2013 @author Russell Standish This file is part of EcoLab Open source licensed under the MIT license. See LICENSE for details. */ /**\file \brief A read/write lock pattern for OpenMP */ #ifndef OMP_RW_LOCK_H #define OMP_RW_LOCK_H #ifdef _OPENMP #include <omp.h> namespace ecolab { /// A read/write lock pattern for OpenMP class RWlock { /** stores which threads are read locked. Implies a maximum of 32 threads are supported on 32 bit machines ad 64 threads on 64 bit machines. */ volatile unsigned long read_mask; omp_lock_t write_lock; public: RWlock(): read_mask(0) {omp_init_lock(&write_lock);} ~RWlock() {omp_destroy_lock(&write_lock);} void lock_for_read() { omp_set_lock(&write_lock); #pragma omp atomic read_mask |= (1 << omp_get_thread_num()); omp_unset_lock(&write_lock); } void unlock_for_read() { #pragma omp atomic read_mask &= ~(1 << omp_get_thread_num()); } void lock_for_write() { unsigned long read_set = read_mask & (1 << omp_get_thread_num()); unlock_for_read(); omp_set_lock(&write_lock); //wait for read locks on other threads to be relinquished while (read_mask); #pragma omp atomic read_mask |= read_set; //reestablish previous read lock status. } void unlock_for_write() { omp_unset_lock(&write_lock); } }; /// apply a read lock to the current scope (prevents write lock from /// being taken) class read_lock { RWlock& lock; public: read_lock(RWlock& lock): lock(lock) {lock.lock_for_read();} ~read_lock() {lock.unlock_for_read();} }; /// apply a write lock (exclusive access) to the current scope class write_lock { RWlock& lock; public: write_lock(RWlock& lock): lock(lock) {lock.lock_for_write();} ~write_lock() {lock.unlock_for_write();} }; } #else //!_OPENMP namespace ecolab { class RWlock {}; struct read_lock { read_lock(RWlock& lock) {} }; struct write_lock { write_lock(RWlock& lock) {} }; } #endif #endif
GB_binop__land_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__land_bool // A.*B function (eWiseMult): GB_AemultB__land_bool // A*D function (colscale): GB_AxD__land_bool // D*A function (rowscale): GB_DxB__land_bool // C+=B function (dense accum): GB_Cdense_accumB__land_bool // C+=b function (dense accum): GB_Cdense_accumb__land_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_bool // C=scalar+B GB_bind1st__land_bool // C=scalar+B' GB_bind1st_tran__land_bool // C=A+scalar GB_bind2nd__land_bool // C=A'+scalar GB_bind2nd_tran__land_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij && bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x && y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_BOOL || GxB_NO_LAND_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__land_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__land_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__land_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__land_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__land_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__land_bool ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__land_bool ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__land_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x && bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__land_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij && y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x && aij) ; \ } GrB_Info GB_bind1st_tran__land_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij && y) ; \ } GrB_Info GB_bind2nd_tran__land_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RefTraceTools.h
/////////////////////////////////////////////////////////////////////////////// // SOFTWARE COPYRIGHT NOTICE AGREEMENT // // This software and its documentation are copyright (2013) by the // // Broad Institute. All rights are reserved. This software is supplied // // without any warranty or guaranteed support whatsoever. The Broad // // Institute is not responsible for its use, misuse, or functionality. // /////////////////////////////////////////////////////////////////////////////// #ifndef REFTRACE_TOOLS_H #define REFTRACE_TOOLS_H // MakeDepend: library OMP // MakeDepend: cflags OMP_FLAGS #include "Basevector.h" #include "CoreTools.h" #include "paths/HyperBasevector.h" #include "pairwise_aligners/SmithWatAffine.h" #include "pairwise_aligners/SmithWatBandedA.h" #include "paths/long/MakeKmerStuff.h" #include "PrintAlignment.h" #include "paths/long/RefTrace.h" // Create a HyperBasevector hbp that equals hb plus its reverse complement. // However only do this for components that need it. void CreateHBPlus(const HyperBasevector& hb, const vec<int>& inv, HyperBasevector& hbp, vec<pair<int,Bool>>& hbp_to_hb); // Linearized reference sequences. Expand the paths from the source to the sink // of the reference graph, built a vecbasevector of expended sequence, and // record the origion of of the chromosome id. class LinearRef { public: LinearRef(const vec<HyperBasevector>& GH,const vec<bool>& c=vec<bool>()); int N() const { return G.size(); } int Source(int g) const {return G_source[g]; } bool IsDoubled(int g) const {return isDoubled[g]; } const basevector& Seq(int g) const { return G[g]; } const vecbasevector& Seqs() const { return G; } private: vec<int> G_source; vecbasevector G; vec<bool> isDoubled; }; // Some data structures. // The structure vedata has the following structure: // { (genome_tig_id, start_pos_on_genome_tig, left_vertex_of_edge_in_hbp ), // (genome_tig_id, stop_pos_on_genome_tig-K+1, right_vertex_of_edge_in_hbp ), // (hbp_edge_id, error_count), // (start_pos_on_hbp_edge, stop_pos_on_hbp_edge) }. class EdgePlacements { public: EdgePlacements(const HyperBasevector& hbp, const vec<pair<int,Bool>>& hbp_to_hb, const vecbasevector& G) : hbp(hbp), hbp_to_hb(hbp_to_hb), G(G) {} // Align edges of hbp to reference. template<int L> void AlignEdgesToRef( // heuristics: const double min_cov_frac, const double max_error_rate, const int max_offset_diff, const double min_group_frac, const int offset_add, const int min_group_save, const Bool fix_bug, // logging: bool REFTRACE_VARIANTS, const int verbosity, ostream& out ); template <int L> void AlignEdgesToRefExp(const int verbosity, ostream& out); void RemoveBadPlacements(); void Twiddle(const int max_twiddle); void TwiddleSmart(); // Generate the matching sequences from the best path. basevector BestSeq(const vec<int>& best_path, const vec<int>& eids , const vec<std::pair<int,int>>& limits , vec<std::tuple<int64_t,int64_t,int,int64_t,int64_t,int64_t,int64_t>>& coors_edge); public: const HyperBasevector& hbp; const vec<pair<int,Bool>>& hbp_to_hb; const vecbasevector& G; vec< quad< triple<int,int,int>, triple<int,int,int>, pair<int,int>, pair<int,int> > > vedata; vec<align> aligns; vec<int> xto_left, xto_right; private: int CorrelatePositionsAlways(const align& a, const int x1)const; }; class GraphZ { public: typedef int (*PenaltyFuncT)(int, int, int); GraphZ(const EdgePlacements& ep, PenaltyFuncT pf) : edge_placements(ep), hbp(ep.hbp), hbp_to_hb(ep.hbp_to_hb), G(ep.G) { Penalty = pf; } void FindShortestPath(const int min_dist, const int max_dist, vec< vec<int> >& spaths, vec< triple<int,int,int> >& spaths_egd, vec< pair<int,int> >& spaths_gg_pen, ostream& out, int verbosity = 0); // Find the corresponding best path in hbp edges. void FindBestEdgePath( const vec< triple<int,int,int> >& spaths_egd, const vec< vec<int> >& spaths, vec<vec<int>>& best_path, vec<vec<int>>& eids, int& best_g) ; public: const EdgePlacements& edge_placements; const HyperBasevector& hbp; const vec<pair<int,Bool>>& hbp_to_hb; const vecbasevector& G; PenaltyFuncT Penalty; vec< triple<int,int,int> > verts; vec< triple< int, int, pair<int,int> > > edges; vec< triple<int,int,int> > egd; digraphE<int> Z; private: void BuildGraph(const int verbosity, ostream& out); void AddGapEdges(const int min_dist, const int max_dist, const int verbosity, ostream& out ,const bool bPreserveDisconnectedComponents=false); void AddConnectedGapEdges(const int min_dist, const int max_dist, const int verbosity, ostream& out ,const bool bPreserveDisconnectedComponents=false); void AddSpecialVerts( const int K, const vec<int>& sources, const vec<int>& sinks, const bool bPreserveDisconnectedComponents=false); void AnnouncePaths( const vec< vec<int> >& spaths, const int K, const vec< triple<int,int,int> >& spaths_egd, const int verbosity, ostream& out ) const; void FindShortestPathBetween( const int this_source, const int this_sink, const digraphE<int>& ZS, const vec<int>& suc, vec< vec<int> >& spaths, vec< triple<int,int,int> >& spaths_egd, vec< pair<int,int> >& spaths_gg_pen, const int verbosity, ostream& out ) const; void MakeZDot(ostream& os); }; template<int L> void EdgePlacements::AlignEdgesToRef( const double min_cov_frac, const double max_error_rate, const int max_offset_diff, const double min_group_frac, const int offset_add, const int min_group_save, const Bool fix_bug, // logging: bool REFTRACE_VARIANTS, const int verbosity, ostream& out ) { // Setup for alignment. vecbasevector all(G); vec< triple<kmer<L>,int,int> > kmers_plus; MakeKmerLookup0( all, kmers_plus ); vec< kmer<L> > kmers( kmers_plus.size( ) ); for ( int64_t i = 0; i < kmers_plus.jsize( ); i++ ) kmers[i] = kmers_plus[i].first; hbp.ToLeft(xto_left), hbp.ToRight(xto_right); // Go through the edges of the (doubled) assembly. #pragma omp parallel for schedule(dynamic,1) for ( int i = 0; i < hbp.EdgeObjectCount( ); i++ ) { const basevector& e = hbp.EdgeObject(i); // For each kmer in the edge, find its hits to the reference and find // the kmers having the most hits. int nkmers = e.isize( ) - L + 1; vec< triple<int64_t,int64_t,int64_t> > locs(nkmers); vec<int> pos( nkmers, vec<int>::IDENTITY ); kmer<L> x; for ( int j = 0; j < nkmers; j++ ) { x.SetToSubOf( e, j ); int64_t low = LowerBound(kmers, x), high = UpperBound(kmers, x); locs[j].first = high - low; locs[j].second = low, locs[j].third = high; } if (fix_bug) ReverseSortSync( locs, pos ); else SortSync( locs, pos ); // Determine cutoff 'top'. double mcf = min_cov_frac; if ( REFTRACE_VARIANTS ) mcf = 0.6; int t = int( floor( nkmers * mcf ) ), top; for ( top = t + 1; top < nkmers; top++ ) if ( locs[top].first > locs[t].first ) break; // Find the associated offsets. vec< pair<int,int> > offset; for ( int j = 0; j < top; j++ ) { for ( int64_t m = locs[j].second; m < locs[j].third; m++ ) { int g = kmers_plus[m].second, o = kmers_plus[m].third - pos[j]; offset.push( g, o ); } } Sort(offset); // Form offsets into groups. vec< triple< int, int, pair<int,int> > > og; int mod = max_offset_diff; if ( REFTRACE_VARIANTS ) mod = 500; for ( int j = 0; j < offset.isize( ); j++ ) { int k; for ( k = j + 1; k < offset.isize( ); k++ ) { if ( offset[k].first != offset[j].first ) break; if ( offset[k].second - offset[k-1].second > mod ) break; } og.push( k - j, offset[j].first, make_pair( offset[j].second, offset[k-1].second ) ); j = k - 1; } ReverseSort(og); if ( verbosity >= 4 ) { #pragma omp critical { out << "\noriginal edge " << hbp_to_hb[i].first << ": "; PRINT4_TO( out, nkmers, top, offset.size( ), og.size( ) ); for ( int j = 0; j < og.isize( ); j++ ) PRINT2_TO( out, j, og[j].first ); } } // Filter offset groups. double mgf = min_group_frac; if ( REFTRACE_VARIANTS ) mgf = 0.65; int gj; for ( gj = 0; gj < og.isize( ); gj++ ) { if ( og[gj].first < min_group_save && og[gj].first < mgf * og[0].first ) { break; } } og.resize(gj); if ( verbosity >= 3 && og.nonempty( ) ) { #pragma omp critical { out << "\noffsets for edge " << i << " (hb_edge=" << hbp_to_hb[i].first << ", nkmers=" << nkmers << ")" << endl; for ( int j = 0; j < og.isize( ); j++ ) { out << "[" << j << "] " << og[j].second << "." << og[j].third.first << "-" << og[j].third.second << " (" << og[j].first << ")" << endl; } } } // Align. The reason for adding to the offset is that there could be in // indel in the first or last L bases. for ( int j = 0; j < og.isize( ); j++ ) { int g = og[j].second; int off_low = og[j].third.first, off_high = og[j].third.second; int mid_offset = ( off_low + off_high ) / 2; int bandwidth = Max(mid_offset - off_low, off_high - mid_offset) + offset_add; // Do the alignment. This is kludgy. If the alignment has too // many errors and the edge is long, we suspect that the problem // might be with a big indel, so we align using a larger bandwidth. // Note the unfortunate us of hardcoded constants. align a; int errors; if ( !REFTRACE_VARIANTS ) { const int SMA_method = 1; if (SMA_method == 1) { SmithWatBandedA( hbp.EdgeObject(i), G[g], -mid_offset, bandwidth, a, errors, 0, 1, 1 ); } else if(SMA_method == 2) { SmithWatAffineBanded( hbp.EdgeObject(i), G[g], -mid_offset, bandwidth, a, errors ); } else { cout << "unrecognized SMA_method" << endl; } if ( double(errors) / double( a.extent2( ) ) > max_error_rate ) { // So the following code (after the continue; // bandwidth=5000) was taking a ton of time // (0.5-1 sec per alignment). Also in my tests // it had a very low success rate <0.5% AND its // removal does not seem to impact the result. // We should do something clever with the // alignments (super aligner?) if we end up // needing it. -- neilw continue; #if 0 const int long_edge = 5000; const int max_indel = 5000; if ( hbp.EdgeLengthBases(i) < long_edge ) continue; SmithWatBandedA( hbp.EdgeObject(i), G[g], -mid_offset, max_indel, a, errors, 0, 1, 1 ); if ( double(errors) / double( a.extent2( ) ) > max_error_rate ) continue; #endif } } else { double score = SmithWatAffineBanded( hbp.EdgeObject(i), G[g], -mid_offset, bandwidth, a, errors ) / 3.0; if ( verbosity >= 3 ) { #pragma omp critical { double err_rate = score / double( a.extent2( ) ); int hb_edge = hbp_to_hb[i].first; int offset = -mid_offset; PRINT5( hb_edge, offset, bandwidth, score, err_rate ); } } double var_max_error_rate = 0.3; if ( score / double( a.extent2( ) ) > var_max_error_rate ) continue; } if ( verbosity >= 3 ) { #pragma omp critical { out << "\nalignment " << j << " of edge " << i << " (" << xto_left[i] << " --> " << xto_right[i] << ", hb_edge=" << hbp_to_hb[i].first << ")" << endl; vec<int> errs = a.MutationsGap1Gap2( hbp.EdgeObject(i), G[g] ); int mismatches = errs[0]; int indels = errs[1] + errs[2]; PRINT5_TO( out, g, a.pos2( ), a.Pos2( ), mismatches, indels ); if ( verbosity == 4 ) { PrintVisualAlignment( True, out, hbp.EdgeObject(i), G[g], a ); } if ( verbosity >= 5 ) { PrintVisualAlignment( False, out, hbp.EdgeObject(i), G[g], a ); } } } // Figure out where the position e.isize( ) - K + 1 should map to // under the alignment. Note that because there could be an indel // there, this is not necessarily a meaningful answer. int x1 = e.isize( ) - hbp.K( ) + 1; int x2 = CorrelatePositionsAlways( a, x1 ); // Save results. #pragma omp critical { vedata.push( make_triple( g, a.pos2( ), xto_left[i] ), make_triple( g, x2, xto_right[i] ), make_pair( i, errors ), make_pair( a.pos1( ), a.Pos1( ) ) ); aligns.push_back(a); } } } // Sort the output to avoid the stochastic downstream behavior of BuildGraph // that seems depend on the input order of the alignment data. SortSync(vedata, aligns); } // An experimental version of function to align edges to reference that // automatically adjust heuristics for best results. template<int L> void EdgePlacements::AlignEdgesToRefExp(const int verbosity, ostream& out) { // Setup for alignment. vecbasevector all(G); vec< triple<kmer<L>,int,int> > kmers_plus; MakeKmerLookup0( all, kmers_plus ); vec< kmer<L> > kmers( kmers_plus.size( ) ); for ( int64_t i = 0; i < kmers_plus.jsize( ); i++ ) kmers[i] = kmers_plus[i].first; hbp.ToLeft(xto_left), hbp.ToRight(xto_right); unsigned int max_g_len = G.front().size(); for(size_t gg=1;gg<G.size();++gg){max_g_len=max(max_g_len,G[gg].size());} vec<std::pair<int,int>> permutation(hbp.EdgeObjectCount()); for(int ii=0;ii<hbp.EdgeObjectCount();++ii){ permutation[ii]=std::make_pair(hbp.EdgeObject(ii).isize(),ii);} std::sort(permutation.rbegin(),permutation.rend()); //very dirty way of load balance, should be coded with a worklist.h instead. typedef triple< int, int, pair<int,int> > og_type; // the og specification from old code typedef std::tuple<og_type,double,int> work_type; // og_type, max_error_rate, offset_add vec< vec<work_type> > ee_vec_work( hbp.EdgeObjectCount() ); // edge_idx -> a list of work_type vec< std::pair<size_t,size_t> > unit_idx_tt_vec_work; // flattened indices of ee_vec_work const int np=3;//number of passes #pragma omp parallel { SmithWatBandedAEngine swbae(sqrt(max_g_len)*2,sqrt(max_g_len)); #pragma omp for schedule(dynamic,1) for ( int ee = 0; ee < hbp.EdgeObjectCount( ); ee++ ) { int i=permutation[ee].second; const basevector& e = hbp.EdgeObject(i); // For each kmer in the edge, find its hits to the reference and find // the kmers having the most hits. int nkmers = e.isize( ) - L + 1; vec< triple<int64_t,int64_t,int64_t> > locs(nkmers); vec<int> pos( nkmers, vec<int>::IDENTITY ); kmer<L> x; for ( int j = 0; j < nkmers; j++ ) { x.SetToSubOf( e, j ); int64_t low = LowerBound(kmers, x), high = UpperBound(kmers, x); locs[j].first = high - low; locs[j].second = low, locs[j].third = high; } ReverseSortSync( locs, pos ); // Determine cutoff 'top'. double min_cov_frac = 0.5; int t = int( floor( nkmers * min_cov_frac ) ), top; for ( top = t + 1; top < nkmers; top++ ) if ( locs[top].first > locs[t].first ) break; // Find the associated offsets. vec< pair<int,int> > offset; for ( int j = 0; j < top; j++ ) { for ( int64_t m = locs[j].second; m < locs[j].third; m++ ) { int g = kmers_plus[m].second, o = kmers_plus[m].third - pos[j]; offset.push( g, o ); } } Sort(offset); for(int pass = 0; pass < np; pass++) { // auto pt = getenv("PASS"); // if (pt) { // pass = atoi(pt); // np = 1; // cout << "pass= " << pass << endl; // } RefTraceHeuristics rth; switch (pass) { case 0: //rth.max_offset_diff = 10; // default //rth.max_error_rate = 0.05; //rth.offset_add = 1; // default //rth.max_twiddle = 3; // default rth.min_group_frac = 0.1; rth.min_group_save = 200; break; case 1: rth.max_offset_diff = 30; rth.max_error_rate = 0.31; rth.offset_add = 5; rth.min_group_frac = 0.1; rth.max_twiddle = 5; break; case 2: rth.max_offset_diff = 350; rth.max_error_rate = 0.31; rth.offset_add = 5; rth.min_group_frac = 0.75; rth.max_twiddle = 120; break; } // Form offsets into groups. vec< triple< int, int, pair<int,int> > > og; for ( int j = 0; j < offset.isize( ); j++ ) { int k; for ( k = j + 1; k < offset.isize( ); k++ ) { if ( offset[k].first != offset[j].first ) break; if ( offset[k].second - offset[k-1].second > rth.max_offset_diff ) break; } og.push( k - j, offset[j].first, make_pair( offset[j].second, offset[k-1].second ) ); j = k - 1; } ReverseSort(og); // Filter offset groups. int gj; for ( gj = 0; gj < og.isize( ); gj++ ) { if ( og[gj].first < rth.min_group_save && og[gj].first < rth.min_group_frac * og[0].first ) { break; } } og.resize(gj); for( const auto& entry: og ){ ee_vec_work[i].emplace_back( entry , rth.max_error_rate, rth.offset_add); } } } { #pragma omp barrier } #pragma omp master { const size_t n=std::accumulate(ee_vec_work.begin(),ee_vec_work.end(),size_t(0),[](size_t a,vec<work_type>const&b){return a+b.size();}); unit_idx_tt_vec_work.reserve(n); for(const auto& entry: permutation){ for(size_t ff=0;ff<ee_vec_work[entry.second].size();++ff){ unit_idx_tt_vec_work.emplace_back(entry.second,ff); } } } { #pragma omp barrier } #pragma omp for schedule(dynamic,1) nowait for ( size_t og_idx = 0 ; og_idx < unit_idx_tt_vec_work.size() ; ++og_idx) { { // Align. The reason for adding to the offset is that there could be in // indel in the first or last L bases. // for ( int j = 0; j < og.isize( ); j++ ) { const auto& indices = unit_idx_tt_vec_work[og_idx]; const auto& entry = ee_vec_work[indices.first][indices.second]; // int g = og[j].second; // int off_low = og[j].third.first, off_high = og[j].third.second; int g = std::get<0>(entry).second; int off_low = std::get<0>(entry).third.first, off_high = std::get<0>(entry).third.second; int mid_offset = ( off_low + off_high ) / 2; int bandwidth = Max(mid_offset - off_low, off_high - mid_offset) + std::get<2>(entry);// rth.offset_add; // Do the alignment. This is kludgy. If the alignment has too // many errors and the edge is long, we suspect that the problem // might be with a big indel, so we align using a larger bandwidth. // Note the unfortunate us of hardcoded constants. align a; int errors; swbae.run( hbp.EdgeObject(indices.first), G[g], -mid_offset, bandwidth, a, errors, 0, 1, 1 ); if ( double(errors) / double( a.extent2( ) ) > std::get<1>(entry) /*rth.max_error_rate*/ ) { const int long_edge = 5000; const int max_indel = 5000; if ( hbp.EdgeLengthBases(indices.first) < long_edge ) continue; swbae.run( hbp.EdgeObject(indices.first), G[g], -mid_offset, max_indel, a, errors, 0, 1, 1 ); if ( double(errors) / double( a.extent2( ) ) > std::get<1>(entry)/*rth.max_error_rate*/ ) continue; } // errors += a.pos1(); // errors += hbp.EdgeObject(i).size()-a.Pos1(); // Figure out where the position e.isize( ) - K + 1 should map to // under the alignment. Note that because there could be an indel // there, this is not necessarily a meaningful answer. int x1 = hbp.EdgeObject(indices.first).isize( ) - hbp.K( ) + 1; int x2 = CorrelatePositionsAlways( a, x1 ); #pragma omp critical { vedata.push( make_triple( g, a.pos2( ), xto_left[indices.first] ), make_triple( g, x2, xto_right[indices.first] ), make_pair( indices.first, errors ), make_pair( a.pos1( ), a.Pos1( ) ) ); aligns.push_back(a); } } } } }//omp parallel // Sort the output to avoid the stochastic downstream behavior of BuildGraph // that seems depend on the input order of the alignment data. UniqueSortSync(vedata, aligns); } #endif
GB_unop__identity_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_uint32 // op(A') function: GB_unop_tran__identity_uint16_uint32 // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_uint32 ( uint16_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gsrb.ompfor.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #if defined(GSRB_FP) #warning Overriding default GSRB implementation and using pre-computed 1.0/0.0 FP array for Red-Black to facilitate vectorization... #elif defined(GSRB_STRIDE2) #if defined(GSRB_OOP) #warning Overriding default GSRB implementation and using out-of-place and stride-2 accesses to minimize the number of flops #else #warning Overriding default GSRB implementation and using stride-2 accesses to minimize the number of flops #endif #elif defined(GSRB_BRANCH) #if defined(GSRB_OOP) #warning Overriding default GSRB implementation and using out-of-place implementation with an if-then-else on loop indices... #else #warning Overriding default GSRB implementation and using if-then-else on loop indices... #endif #else #define GSRB_STRIDE2 // default implementation #endif //------------------------------------------------------------------------------------------------------------------------------ void smooth(level_type * level, int x_id, int rhs_id, double a, double b){ int s; for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps per GSRB smooth // exchange the ghost zone... #ifdef GSRB_OOP // out-of-place GSRB ping pongs between x and VECTOR_TEMP if((s&1)==0){exchange_boundary(level, x_id,stencil_get_shape());apply_BCs(level, x_id,stencil_get_shape());} else{exchange_boundary(level,VECTOR_TEMP,stencil_get_shape());apply_BCs(level,VECTOR_TEMP,stencil_get_shape());} #else // in-place GSRB only operates on x exchange_boundary(level, x_id,stencil_get_shape());apply_BCs(level, x_id,stencil_get_shape()); #endif // apply the smoother... double _timeStart = getTime(); int box; for(box=0;box<level->num_my_boxes;box++){ // loop over all boxes this process owns... const double h2inv = 1.0/(level->h*level->h); const int ghosts = level->box_ghosts; const int jStride = level->box_jStride; const int kStride = level->box_kStride; const int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k^s)&1; // is element 000 red or black on *THIS* sweep const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride); const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride); #ifdef GSRB_OOP const double * __restrict__ x_n; double * __restrict__ x_np1; if((s&1)==0){x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); x_np1 = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride);} else{x_n = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride); x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride);} #else const double * __restrict__ x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point #endif int i,j,k; #pragma omp parallel for private(i,j,k) schedule(static,1) // chunksize=1 implies the collection of threads gets a slab (spatial locality in k exploited in LLC) //#pragma omp parallel for private(i,j,k) // Default schedule chunksize implies each thread gets a slab and inter-thread locality may not be exploited for(k=0;k<level->box_dim;k++){ for(j=0;j<level->box_dim;j++){ #if defined(GSRB_FP) const double * __restrict__ RedBlack = level->RedBlack_FP + ghosts*(1+jStride) + kStride*((k^color000)&0x1); for(i=0;i<level->box_dim;i++){ int ij = i + j*jStride; int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(x_n); x_np1[ijk] = x_n[ijk] + RedBlack[ij]*Dinv[ijk]*(rhs[ijk]-Ax); //x_np1[ijk] = ((i^j^k^color000)&1) ? x_n[ijk] : x_n[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); } // i #elif defined(GSRB_STRIDE2) #ifdef GSRB_OOP // out-of-place must copy old value... for(i=0;i<level->box_dim;i++){ int ijk = i + j*jStride + k*kStride; x_np1[ijk] = x_n[ijk]; } // i copy #endif for(i=((j^k^color000)&1);i<level->box_dim;i+=2){ // stride-2 GSRB int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(x_n); x_np1[ijk] = x_n[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); } // i stencil #elif defined(GSRB_BRANCH) for(i=0;i<level->box_dim;i++){ int ijk = i + j*jStride + k*kStride; if((i^j^k^color000^1)&1){ // looks very clean when [0] is i,j,k=0,0,0 double Ax = apply_op_ijk(x_n); x_np1[ijk] = x_n[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); #ifdef GSRB_OOP }else{ x_np1[ijk] = x_n[ijk]; // copy old value when sweep color != cell color #endif } } // i #else #error no GSRB implementation was specified #endif }} // j,k } // boxes level->timers.smooth += (double)(getTime()-_timeStart); } // s-loop } //------------------------------------------------------------------------------------------------------------------------------
LAGraph_tricount.c
//------------------------------------------------------------------------------ // LAGraph_tricount: count the number of triangles in a graph //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_tricount: count the number of triangles in a graph, // Contributed by Tim Davis, Texas A&M. // Given a symmetric graph A with no-self edges, LAGraph_tricount counts the // number of triangles in the graph. A triangle is a clique of size three, // that is, 3 nodes that are all pairwise connected. // One of 6 methods are used, defined below where L and U are the strictly // lower and strictly upper triangular parts of the symmetrix matrix A, // respectively. Each method computes the same result, ntri: // 1: Burkhardt: ntri = sum (sum ((A^2) .* A)) / 6 // 2: Cohen: ntri = sum (sum ((L * U) .* A)) / 2 // 3: Sandia: ntri = sum (sum ((L * L) .* L)) // 4: Sandia2: ntri = sum (sum ((U * U) .* U)) // 5: SandiaDot: ntri = sum (sum ((L * U') .* L)). Note that L=U'. // 6: SandiaDot2: ntri = sum (sum ((U * L') .* U)). Note that U=L'. // A is a square symmetric matrix, of any type. Its values are ignored, // (assuming v3.2.0 of SuiteSparse:GraphBLAS is used); otherwise, A must be // binary. Results are undefined for methods 1 and 2 if self-edges exist in A. // Results are undefined for all methods if A is unsymmetric. // TODO use an enum for the above methods. // All matrices are assumed to be in CSR format (GxB_BY_ROW in // SuiteSparse:GraphBLAS). The 6 methods work fine if the matrices are in CSC // format; just the underlying algorithms employed inside SuiteSparse:GraphBLAS // will differ (dot product vs saxpy, for example). If L and U are in CSC // format, then the "Dot" methods would use an outer product approach, which is // slow in SuiteSparse:GraphBLAS (requiring an explicit transpose). The // auto-sort rule probably needs to be reversed, if A is in CSC format (this is // not yet tested). // Methods 1 and 2 are much slower than methods 3 to 6 and take more memory. // Methods 3 to 6 take a little less memory than methods 1 and 2, are by far // the fastest methods in general. The methods 3 and 5 compute the same // intermediate matrix (L*L), and differ only in the way the matrix // multiplication is done. Method 3 uses an outer-product method (Gustavson's // method). Method 5 uses dot products (assuming both matrices are in CSR // format) and does not explicitly transpose U. They are called the "Sandia" // method since matrices in the KokkosKernels are stored in compressed-sparse // row form, so (L*L).*L in the KokkosKernel method is equivalent to (L*L).*L // in SuiteSparse:GraphBLAS when the matrices in SuiteSparse:GraphBLAS are in // their default format (also by row). // The new GxB_PAIR_INT64 binary operator in SuiteSparse:GraphBLAS v3.2.0 is // used in the semiring, if available. This is the function f(x,y)=1, so the // values of A are not accessed. They can have any values and any type. Only // the structure of A. Otherwise, without this operator, the input matrix A // must be binary. // Reference: Wolf, Deveci, Berry, Hammond, Rajamanickam, 'Fast linear algebra- // based triangle counting with KokkosKernels', IEEE HPEC'17, // https://dx.doi.org/10.1109/HPEC.2017.8091043, #include "LAGraph_internal.h" #include "GB_msort_2.h" //------------------------------------------------------------------------------ // tricount_prep: construct L and U //------------------------------------------------------------------------------ #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ GrB_free (&thunk) ; \ GrB_free (L) ; \ GrB_free (U) ; static GrB_Info tricount_prep ( GrB_Matrix *L, GrB_Matrix *U, GrB_Matrix A ) { GrB_Index n, *I = NULL, *J = NULL ; bool *X = NULL ; #if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \ && ( GxB_IMPLEMENTATION >= GxB_VERSION (3,0,1) ) //---------------------------------------------------------------------- // build L and/or U with GxB_select //---------------------------------------------------------------------- GxB_Scalar thunk ; LAGr_Matrix_nrows (&n, A) ; LAGr_Scalar_new (&thunk, GrB_INT64) ; if (L != NULL) { // L = tril (A,-1) LAGr_Matrix_new (L, GrB_BOOL, n, n) ; LAGr_Scalar_setElement (thunk, -1) ; LAGr_select (*L, NULL, NULL, GxB_TRIL, A, thunk, NULL) ; } if (U != NULL) { // U = triu (A,1) LAGr_Matrix_new (U, GrB_BOOL, n, n) ; LAGr_Scalar_setElement (thunk, 1) ; LAGr_select (*U, NULL, NULL, GxB_TRIU, A, thunk, NULL) ; } LAGr_free (&thunk) ; #else //---------------------------------------------------------------------- // build L and U with extractTuples (slower than GxB_select) //---------------------------------------------------------------------- GrB_Vector thunk ; LAGr_Matrix_nrows (&n, A) ; if (L != NULL || U != NULL) { GrB_Index nvals ; LAGr_Matrix_nvals (&nvals, A) ; I = LAGraph_malloc (nvals, sizeof (GrB_Index)) ; J = LAGraph_malloc (nvals, sizeof (GrB_Index)) ; X = LAGraph_malloc (nvals, sizeof (bool)) ; if (I == NULL || J == NULL || X == NULL) { LAGRAPH_ERROR ("out of memory") ; } LAGr_Matrix_extractTuples (I, J, X, &nvals, A) ; // remove entries in the upper triangular part nedges = 0 ; for (int64_t k = 0 ; k < nvals ; k++) { if (I [k] > J [k]) { // keep this entry I [nedges] = I [k] ; J [nedges] = J [k] ; X [nedges] = X [k] ; nedges++ ; } } if (L != NULL) { LAGr_Matrix_new (L, GrB_BOOL, n, n) ; LAGr_Matrix_build (*L, I, J, X, nedges, GrB_LOR) ; } if (U != NULL) { LAGr_Matrix_new (U, GrB_BOOL, n, n) ; LAGr_Matrix_build (*U, J, I, X, nedges, GrB_LOR) ; } LAGRAPH_FREE (I) ; LAGRAPH_FREE (J) ; LAGRAPH_FREE (X) ; } #endif } //------------------------------------------------------------------------------ // LAGraph_tricount: count the number of triangles in a graph //------------------------------------------------------------------------------ #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ GrB_free (&C) ; \ GrB_free (&L) ; \ GrB_free (&T) ; \ GrB_free (&U) ; \ LAGRAPH_FREE (W0) ; \ LAGRAPH_FREE (W1) ; \ LAGRAPH_FREE (P) ; \ LAGRAPH_FREE (D) ; #if 0 // easy mode: LAGr_info LAGraph_tricount ( uint64_t *ntriangles, // # of triangles LAGr_Graph G, // a graph LAGr_descriptor d ) ; LAGr_info LAGraph_tricount ( uint64_t *ntriangles, // # of triangles bool directed, LAGr_Matrix A // adj matrix of an directed graph ) ; #endif GrB_Info LAGraph_tricount // count # of triangles ( int64_t *ntri, // # of triangles const int method, // 1 to 6, see above int sorting, // 0: no sort // 1: sort by degree, ascending order // -1: sort by degree, descending order // 2: auto selection: no sort if rule is not // triggered. Otherise: sort in ascending order // for methods 3 and 5, descending ordering for // methods 4 and 6. const int64_t *degree, // degree of each node, may be NULL if sorting==0. // of size n, unmodified. const GrB_Matrix A_in // input matrix, must be symmetric, no diag entries ) { //-------------------------------------------------------------------------- // check inputs and initialize //-------------------------------------------------------------------------- GrB_Info info ; GrB_Index n ; GrB_Matrix C = NULL, L = NULL, U = NULL, T = NULL, A = NULL ; int64_t *P = NULL, *D = NULL, *W0 = NULL, *W1 = NULL ; LAGr_Matrix_nrows (&n, A_in) ; #if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \ && ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) ) // the PAIR function is f(x,y)=1, ignoring input values and type GrB_Descriptor desc_s = GrB_DESC_S ; GrB_Descriptor desc_st1 = GrB_DESC_ST1 ; GrB_Semiring semiring = GxB_PLUS_PAIR_INT64 ; // GrB_Semiring semiring = GxB_PLUS_PAIR_INT32 ; #else // f(x,y)=x*y, so x and y must be 1 to compute the correct count, and // thus the input matrix A must be binary. GrB_Descriptor desc_s = NULL ; GrB_Descriptor desc_st1 = LAGraph_desc_otoo ; GrB_Semiring semiring = LAGraph_PLUS_TIMES_INT64 ; #endif GrB_Monoid sum = LAGraph_PLUS_INT64_MONOID ; LAGr_Matrix_new (&C, GrB_INT64, n, n) ; // LAGr_Matrix_new (&C, GrB_INT32, n, n) ; //-------------------------------------------------------------------------- // heuristic sort rule //-------------------------------------------------------------------------- if (sorting == 2) { // auto selection of sorting method sorting = 0 ; // default is not to sort if (method >= 3 && method <= 6) { // This rule is very similar to Scott Beamer's rule in the GAP TC // benchmark, except that it is extended to handle the ascending // sort needed by methods 3 and 5. It also uses a stricter rule, // since the performance of triangle counting in GraphBLAS is less // sensitive to the sorting as compared to the GAP algorithm. This // is because the dot products in GraphBLAS use binary search if // one vector is very sparse compared to the other. As a result, // GraphBLAS needs the sort for fewer matrices, as compared to the // GAP algorithm. // With this rule, the GAP-kron and GAP-twitter matrices are // sorted, and the others remain unsorted. With the rule in the // GAP tc.cc benchmark, GAP-web is also sorted, but it is not // sorted here. #define NSAMPLES 1000 GrB_Index nvals ; LAGr_Matrix_nvals (&nvals, A_in) ; if (n > NSAMPLES && ((double) nvals / ((double) n)) >= 10) { // pick 1000 nodes at random and determine their degree // struct drand48_data buffer ; // srand48_r ((long int) n, &buffer) ; uint64_t seed = n ; int64_t samples [NSAMPLES] ; int64_t dsum = 0 ; for (int k = 0 ; k < NSAMPLES ; k++) { uint64_t result = LAGraph_rand64 (&seed) ; // lrand48_r (&buffer, &result) ; int64_t i = result % n ; int64_t d = degree [i] ; samples [k] = d ; dsum += d ; } // find the average degree double sample_average = ((double) dsum) / NSAMPLES ; // find the median degree GB_qsort_1a (samples, NSAMPLES) ; double sample_median = (double) samples [NSAMPLES/2] ; printf ("average degree: %g\n", sample_average) ; printf ("median degree: %g\n", sample_median) ; // sort if the average degree is very high compared to the // median if (sample_average > 4 * sample_median) { switch (method) { case 3: sorting = 1 ; break ; // sort ascending case 4: sorting = -1 ; break ; // sort descending case 5: sorting = 1 ; break ; // sort ascending case 6: sorting = -1 ; break ; // sort descending default: sorting = 0 ; break ; // no sort } } } } printf ("auto sorting: %d: ", sorting) ; if (sorting == 0) printf ("none") ; else if (sorting == -1) printf ("descending") ; else if (sorting == 1) printf ("ascending") ; printf ("\n") ; } //-------------------------------------------------------------------------- // sort the input matrix, if requested //-------------------------------------------------------------------------- if (sorting != 0) { // decide how many threads to use #define CHUNK (64*1024) int nthreads = LAGraph_get_nthreads ( ) ; nthreads = LAGRAPH_MIN (nthreads, n/CHUNK) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // allocate workspace P = LAGraph_malloc (n, sizeof (int64_t)) ; D = LAGraph_malloc (n, sizeof (int64_t)) ; W0 = LAGraph_malloc (n, sizeof (int64_t)) ; W1 = LAGraph_malloc (n, sizeof (int64_t)) ; if (P == NULL || D == NULL || W0 == NULL || W1 == NULL) { // out of memory LAGRAPH_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // construct the pair [D,P] to sort if (sorting > 0) { printf ("sort ascending\n") ; // sort [D,P] in ascending order of degree, tie-breaking on P #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < n ; k++) { D [k] = degree [k] ; P [k] = k ; } } else { printf ("sort descending\n") ; // sort [D,P] in descending order of degree, tie-breaking on P #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < n ; k++) { D [k] = -degree [k] ; P [k] = k ; } } // for (int64_t k = 0 ; k < n ; k++) // { // printf ("before [%3ld %3ld]\n", D [k], P [k]) ; // } GB_msort_2 (D, P, W0, W1, n, nthreads) ; // printf ("\n") ; // for (int64_t k = 0 ; k < n ; k++) // { // printf ("after [%3ld %3ld]\n", D [k], P [k]) ; // } // T = A_in (P,P) and typecast to boolean LAGr_Matrix_new (&T, GrB_BOOL, n, n) ; LAGr_extract (T, NULL, NULL, A_in, P, n, P, n, NULL) ; A = T ; } else { // use the input matrix as-is A = A_in ; } #if 0 printf ("permuted:\n") ; GrB_Index ignore ; GrB_Matrix_nvals (&ignore, A) ; GxB_print (A, 3) ; // compute the degree of each node (TODO: make this an LAGraph utility) GrB_Vector X, D2 ; LAGr_Vector_new (&X, GrB_BOOL, n) ; LAGr_Vector_new (&D2, GrB_INT64, n) ; LAGr_assign (X, NULL, NULL, 0, GrB_ALL, n, NULL) ; LAGr_assign (D2, NULL, NULL, 0, GrB_ALL, n, NULL) ; LAGr_vxm (D2, NULL, GrB_PLUS_INT64, GxB_PLUS_PAIR_INT64, X, A, NULL) ; GxB_print (D2, 3) ; GrB_free (&X) ; GrB_Type type ; GrB_Index n2, nvals2, *Di ; int64_t *deg ; LAGr_Vector_export (&D2, &type, &n2, &nvals2, &Di, (void **) &deg, NULL) ; if (n != n2 || n != nvals2) { printf ("??\n") ; abort ( ) ; } printf ("\nNew: sorting %d\n", sorting) ; for (int i = 0 ; i < 67 ; i++) { printf ("node: %d degree %ld\n", i, deg [i]) ; } #endif // free workspace LAGRAPH_FREE (W0) ; LAGRAPH_FREE (W1) ; LAGRAPH_FREE (D) ; LAGRAPH_FREE (P) ; //-------------------------------------------------------------------------- // count triangles //-------------------------------------------------------------------------- switch (method) { #if 0 // case 0: // minitri: ntri = nnz (A*E == 2) / 3 // This method requires the incidence matrix E. It is very slow // compared to the other methods. The construction of E was done // in the Test/Tricount/*.c driver, and it hasn't been added here. LAGr_Matrix_ncols (&ne, E) ; LAGr_free (&C) ; LAGr_Matrix_new (&C, GrB_INT64, n, ne) ; LAGr_mxm (C, NULL, NULL, semiring, A, E, NULL) ; LAGr_Matrix_new (&S, GrB_BOOL, n, ne) ; LAGr_apply (S, NULL, NULL, LAGraph_ISTWO_INT64, C, NULL) ; LAGr_reduce (ntri, NULL, sum, S, NULL) ; (*ntri) /= 3 ; break ; #endif case 1: // Burkhardt: ntri = sum (sum ((A^2) .* A)) / 6 LAGr_mxm (C, A, NULL, semiring, A, A, desc_s) ; LAGr_reduce (ntri, NULL, sum, C, NULL) ; (*ntri) /= 6 ; break ; case 2: // Cohen: ntri = sum (sum ((L * U) .* A)) / 2 LAGRAPH_OK (tricount_prep (&L, &U, A)) ; LAGr_mxm (C, A, NULL, semiring, L, U, desc_s) ; LAGr_reduce (ntri, NULL, sum, C, NULL) ; (*ntri) /= 2 ; break ; case 3: // Sandia: ntri = sum (sum ((L * L) .* L)) // using the masked saxpy3 method LAGRAPH_OK (tricount_prep (&L, NULL, A)) ; LAGr_mxm (C, L, NULL, semiring, L, L, desc_s) ; LAGr_reduce (ntri, NULL, sum, C, NULL) ; break ; case 4: // Sandia2: ntri = sum (sum ((U * U) .* U)) // using the masked saxpy3 method LAGRAPH_OK (tricount_prep (NULL, &U, A)) ; LAGr_mxm (C, U, NULL, semiring, U, U, desc_s) ; LAGr_reduce (ntri, NULL, sum, C, NULL) ; break ; case 5: // SandiaDot: ntri = sum (sum ((L * U') .* L)) // This tends to be the fastest method, for most matrices, but the // Dot2 method is also very fast. // using the masked dot product LAGRAPH_OK (tricount_prep (&L, &U, A)) ; LAGr_mxm (C, L, NULL, semiring, L, U, desc_st1) ; LAGr_reduce (ntri, NULL, sum, C, NULL) ; break ; case 6: // SandiaDot2: ntri = sum (sum ((U * L') .* U)) // using the masked dot product LAGRAPH_OK (tricount_prep (&L, &U, A)) ; LAGr_mxm (C, U, NULL, semiring, U, L, desc_st1) ; LAGr_reduce (ntri, NULL, sum, C, NULL) ; break ; default: // invalid method LAGRAPH_FREE_ALL ; return (GrB_INVALID_VALUE) ; break ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- LAGRAPH_FREE_ALL ; return (GrB_SUCCESS) ; }
log_multiplier_kernel.c
#include <Python.h> #include <stdio.h> #include "numpy/arrayobject.h" #include "numpy/npy_math.h" #include <math.h> #include <omp.h> #define NUM_THREADS 20 struct module_state { PyObject *error; }; typedef struct { int N; long granularity; double *delP; double *delM; } appr_arithmetic; static appr_arithmetic obj; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif double* _fast_multiplier(double *_d1, double *_d2, int _m, int _n, int _p) { double *temp, *result; double diff, lv_max, lv_min; int _off1, _off2, _off3, i, j, k, t, index, t_num, t_ovf, chunk, start, end; result = (double *) malloc(sizeof(double) * 2 * _m * _n); _off1 = _m * _p; _off2 = _p * _n; _off3 = _m * _n; t_ovf = _off3 % NUM_THREADS; chunk = _off3 / NUM_THREADS; index = 0, i = 0, j = 0, k = 0, t = 0, start = 0, end = 0, lv_max = 0, lv_min = 0, t_num = 0; temp = NULL; #pragma omp parallel num_threads (NUM_THREADS)\ private (temp, index, i, j, k, t, diff, lv_max, lv_min, t_num, start, end)\ shared (_d1, _d2, _m, _n, _p, result, _off1, _off2, _off3, obj, t_ovf, chunk)\ default (none) { temp = (double *) malloc(sizeof(double) * 2 * _p); t_num = omp_get_thread_num(); start = t_num * chunk * _p; start = ((t_num < t_ovf) && (t_ovf != 0)) ? start + (t_num * _p) : start + (t_ovf * _p); end = start + (chunk * _p); end = ((t_num < t_ovf) && (t_ovf != 0)) ? end + _p : end; for (index = start; index < end; index++) { i = index / _off2; t = index % _off2; j = t / _p; k = t % _p; temp[k] = (_d1[(i * _p) + k] == _d2[(k * _n) + j]); temp[k + _p] = _d1[(i * _p) + k + _off1] + _d2[(k * _n) + j + _off2]; if (k == 0) { result[(i * _n) + j] = temp[0]; result[(i * _n) + j + _off3] = temp[_p]; } else { lv_max = (result[(i * _n) + j + _off3] > temp[k + _p]) ? result[(i * _n) + j + _off3] : temp[k + _p]; lv_min = (result[(i * _n) + j + _off3] <= temp[k + _p]) ? result[(i * _n) + j + _off3] : temp[k + _p]; diff = lv_max - lv_min; diff = ((npy_isnan(diff)) || (diff > obj.N)) ? obj.N : diff; diff *= obj.granularity; if (temp[k] == result[(i * _n) + j]) result[(i * _n) + j + _off3] = lv_max + obj.delP[lround(diff)]; else { result[(i * _n) + j] = (result[(i * _n) + j + _off3] > temp[k + _p]) ? result[(i * _n) + j] : temp[k]; result[(i * _n) + j + _off3] = lv_max + obj.delM[lround(diff)]; } } } free(temp); #pragma omp barrier } return result; } void capsule_cleanup(PyObject *capsule) { void *memory = PyCapsule_GetPointer(capsule, NULL); free(memory); } static PyObject *py_fast_multiplier(PyObject *self, PyObject *args) { PyArrayObject *float_list1, *float_list2; PyObject *result, *capsule; int m, n, p; npy_intp dim[3]; double *d1, *d2, *d3; if (!PyArg_ParseTuple(args, "OO", &float_list1, &float_list2)) return NULL; d1 = (double *) float_list1->data; d2 = (double *) float_list2->data; m = (int)float_list1->dimensions[1]; n = (int)float_list2->dimensions[2]; p = (int)float_list1->dimensions[2]; dim[0] = 2; dim[1] = m; dim[2] = n; d3 = _fast_multiplier(d1, d2, m, n, p); result = PyArray_SimpleNewFromData(3, dim, NPY_DOUBLE, (void *)d3); if (result == NULL) return NULL; capsule = PyCapsule_New(d3, NULL, capsule_cleanup); PyArray_SetBaseObject((PyArrayObject *) result, capsule); return result; } static PyObject *py_init_error_obj(PyObject *self, PyObject *args) { PyArrayObject *float_list1, *float_list2; if (!PyArg_ParseTuple(args, "ilOO", &obj.N, &obj.granularity, &float_list1, &float_list2)) return NULL; obj.delP = (double *) float_list1->data; obj.delM = (double *) float_list2->data; return Py_BuildValue("i", 1); } static PyMethodDef symtab[] = { {"fast_multiplier", (PyCFunction)py_fast_multiplier, METH_VARARGS|METH_KEYWORDS}, {"init_error_obj", (PyCFunction)py_init_error_obj, METH_VARARGS|METH_KEYWORDS}, {NULL, NULL} /* sentinel */ }; static int lst_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int lst_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "native_matr_mult_wrapper", NULL, sizeof(struct module_state), symtab, NULL, lst_traverse, lst_clear, NULL }; PyObject* PyInit_native_matr_mult_wrapper(void) { /* Create the module and add the functions */ PyObject* module = PyModule_Create(&moduledef); import_array(); if (module == NULL) return NULL; struct module_state *st = GETSTATE(module); /* Add some symbolic constants to the module */ st->error = PyErr_NewException("Dummy.Error", NULL, NULL); if (st->error == NULL) { Py_DECREF(module); return NULL; } return module; }
GB_binop__max_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__max_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp64) // A*D function (colscale): GB (_AxD__max_fp64) // D*A function (rowscale): GB (_DxB__max_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__max_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__max_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp64) // C=scalar+B GB (_bind1st__max_fp64) // C=scalar+B' GB (_bind1st_tran__max_fp64) // C=A+scalar GB (_bind2nd__max_fp64) // C=A'+scalar GB (_bind2nd_tran__max_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = fmax (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fmax (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_FP64 || GxB_NO_MAX_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = fmax (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = fmax (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmax (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmax (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__div_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_uint32 // A.*B function (eWiseMult): GB_AemultB__div_uint32 // A*D function (colscale): GB_AxD__div_uint32 // D*A function (rowscale): GB_DxB__div_uint32 // C+=B function (dense accum): GB_Cdense_accumB__div_uint32 // C+=b function (dense accum): GB_Cdense_accumb__div_uint32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_uint32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_uint32 // C=scalar+B GB_bind1st__div_uint32 // C=scalar+B' GB_bind1st_tran__div_uint32 // C=A+scalar GB_bind2nd__div_uint32 // C=A'+scalar GB_bind2nd_tran__div_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IDIV_UNSIGNED (x, y, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT32 || GxB_NO_DIV_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__div_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 32) ; \ } GrB_Info GB_bind1st_tran__div_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 32) ; \ } GrB_Info GB_bind2nd_tran__div_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp for simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}} #pragma omp for simd foo void test_no_clause() { int i; #pragma omp for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}} #pragma omp for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}} #pragma omp for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{integer constant expression}} #pragma omp for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel #pragma omp for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{integer constant expression}} #pragma omp for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd collapse(2) for (i = 0; i < 16; ++i) // expected-note {{defined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected expression}} #pragma omp for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp for simd'}} #pragma omp for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp for simd'}} for (int i = 0; i < 10; ++i) ; }
GB_binop__minus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__minus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp32) // A*D function (colscale): GB (_AxD__minus_fp32) // D*A function (rowscale): GB (_DxB__minus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp32) // C=scalar+B GB (_bind1st__minus_fp32) // C=scalar+B' GB (_bind1st_tran__minus_fp32) // C=A+scalar GB (_bind2nd__minus_fp32) // C=A'+scalar GB (_bind2nd_tran__minus_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
explicit_solver_strategy.h
// // Authors: // Miguel Angel Celigueta maceli@cimne.upc.edu // Miquel Santasusana msantasusana@cimne.upc.edu // #if !defined(KRATOS_EXPLICIT_SOLVER_STRATEGY) #define KRATOS_EXPLICIT_SOLVER_STRATEGY // Project includes #include "utilities/timer.h" #include "custom_elements/Particle_Contact_Element.h" #include "includes/variables.h" #include "includes/deprecated_variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> #include <time.h> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif #define CUSTOMTIMER 0 // ACTIVATES AND DISABLES ::TIMER::::: #include "includes/define.h" #include "utilities/openmp_utils.h" #include "includes/model_part.h" #include "solving_strategies/strategies/implicit_solving_strategy.h" #include "solving_strategies/schemes/scheme.h" #include "custom_strategies/schemes/dem_integration_scheme.h" #include "custom_utilities/create_and_destroy.h" #include "custom_utilities/dem_fem_utilities.h" #include "custom_utilities/GeometryFunctions.h" #include "custom_utilities/inlet.h" #include "custom_elements/cluster3D.h" #include "custom_elements/rigid_body_element.h" ////Cfeng #include "custom_utilities/dem_fem_search.h" #include "custom_utilities/discrete_particle_configure.h" #include "custom_utilities/rigid_face_geometrical_object_configure.h" #ifdef USING_CGAL #include <CGAL/spatial_sort.h> #endif /* Timer defines */ #ifdef CUSTOMTIMER #define KRATOS_TIMER_START(t) Timer::Start(t); #define KRATOS_TIMER_STOP(t) Timer::Stop(t); #else #define KRATOS_TIMER_START(t) #define KRATOS_TIMER_STOP(t) #endif namespace Kratos { class ExplicitSolverSettings { public: KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverSettings); ExplicitSolverSettings() { } ~ExplicitSolverSettings() { } ModelPart* r_model_part; ModelPart* contact_model_part; ModelPart* fem_model_part; ModelPart* cluster_model_part; ModelPart* inlet_model_part; }; class KRATOS_API(DEM_APPLICATION) ExplicitSolverStrategy { public: typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ElementsArrayType::iterator ElementsIterator; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef ModelPart::ElementsContainerType::ContainerType ElementsContainerType; typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType; typedef SpatialSearch::ResultElementsContainerType ResultElementsContainerType; typedef SpatialSearch::VectorResultElementsContainerType VectorResultElementsContainerType; typedef SpatialSearch::RadiusArrayType RadiusArrayType; typedef SpatialSearch::DistanceType DistanceType; typedef SpatialSearch::VectorDistanceType VectorDistanceType; typedef SpatialSearch::ResultConditionsContainerType ResultConditionsContainerType; typedef SpatialSearch::VectorResultConditionsContainerType VectorResultConditionsContainerType; typedef PointerVectorSet<Properties, IndexedObject> PropertiesContainerType; typedef PropertiesContainerType::iterator PropertiesIterator; typedef DiscreteParticleConfigure<3> ElementConfigureType; typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType; typedef Variable<double> ComponentOf3ComponentsVariableType; /// Pointer definition of ExplicitSolverStrategy KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverStrategy); ExplicitSolverStrategy() { } ExplicitSolverStrategy(ExplicitSolverSettings& settings, const double max_delta_time, const int n_step_search, const double safety_factor, const int delta_option, ParticleCreatorDestructor::Pointer p_creator_destructor, DEM_FEM_Search::Pointer p_dem_fem_search, SpatialSearch::Pointer pSpSearch, Parameters strategy_parameters) { mParameters = strategy_parameters; mDeltaOption = delta_option; mpParticleCreatorDestructor = p_creator_destructor; mpDemFemSearch = p_dem_fem_search; mpSpSearch = pSpSearch; //Also checks old flag name for backward compatibility issues. if(mParameters["do_search_dem_neighbours"].GetBool()) { mDoSearchNeighbourElements = true; } else mDoSearchNeighbourElements = false; p_creator_destructor->SetDoSearchNeighbourElements(mDoSearchNeighbourElements); if(mParameters["do_search_fem_neighbours"].GetBool()) mDoSearchNeighbourFEMElements = true; else mDoSearchNeighbourFEMElements = false; mMaxTimeStep = max_delta_time; mNStepSearch = n_step_search; mSafetyFactor = safety_factor; mpDem_model_part = &(*(settings.r_model_part)); KRATOS_ERROR_IF(mpDem_model_part == NULL) << "Undefined settings.r_model_part in ExplicitSolverStrategy constructor" << std::endl; mpContact_model_part = &(*(settings.contact_model_part)); KRATOS_ERROR_IF(mpContact_model_part == NULL) << "Undefined settings.contact_model_part in ExplicitSolverStrategy constructor" << std::endl; mpFem_model_part = &(*(settings.fem_model_part)); KRATOS_ERROR_IF(mpFem_model_part == NULL) << "Undefined settings.fem_model_part in ExplicitSolverStrategy constructor" << std::endl; mpCluster_model_part = &(*(settings.cluster_model_part)); KRATOS_ERROR_IF(mpCluster_model_part == NULL) << "Undefined settings.cluster_model_part in ExplicitSolverStrategy constructor" << std::endl; mpInlet_model_part = &(*(settings.inlet_model_part)); KRATOS_ERROR_IF(mpInlet_model_part == NULL) << "Undefined settings.inlet_model_part in ExplicitSolverStrategy constructor" << std::endl; if(mParameters["RemoveBallsInitiallyTouchingWalls"].GetBool()) mRemoveBallsInitiallyTouchingWallsOption = true; else mRemoveBallsInitiallyTouchingWallsOption = false; } /// Destructor. virtual ~ExplicitSolverStrategy() { //Timer::SetOuputFile("TimesPartialRelease"); //Timer::PrintTimingInformation(); } struct LessX { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[0] < q->GetGeometry()[0].Coordinates()[0];} }; struct LessY { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[1] < q->GetGeometry()[0].Coordinates()[1];} }; struct LessZ { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[2] < q->GetGeometry()[0].Coordinates()[2];} }; struct SpatialSortingTraits { typedef SphericParticle* Point_2; typedef LessX Less_x_2; typedef LessY Less_y_2; typedef LessZ Less_z_2; Less_x_2 less_x_2_object() const {return Less_x_2();} Less_y_2 less_y_2_object() const {return Less_y_2();} Less_z_2 less_z_2_object() const { return Less_z_2();} }; #ifdef USING_CGAL void ReorderParticles() { SpatialSortingTraits sst; CGAL::spatial_sort(mListOfSphericParticles.begin(), mListOfSphericParticles.end(), sst); } #endif template <class T> void RebuildListOfSphericParticles(ElementsArrayType& pElements, std::vector<T*>& rCustomListOfParticles){ KRATOS_TRY rCustomListOfParticles.resize(pElements.size()); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++){ ElementsArrayType::iterator particle_pointer_it = pElements.ptr_begin() + k; T* spheric_particle = dynamic_cast<T*>(&(*particle_pointer_it)); rCustomListOfParticles[k] = spheric_particle; } return; KRATOS_CATCH("") } void RebuildListOfDiscontinuumSphericParticles() { RebuildListOfSphericParticles<SphericParticle>(GetModelPart().GetCommunicator().LocalMesh().Elements(), mListOfSphericParticles); } void RebuildPropertiesProxyPointers(std::vector<SphericParticle*>& rCustomListOfSphericParticles); void SendProcessInfoToClustersModelPart(); void UpdateMaxIdOfCreatorDestructor(); void RepairPointersToNormalProperties(std::vector<SphericParticle*>& rCustomListOfSphericParticles); virtual void Initialize(); virtual void AttachSpheresToStickyWalls(); virtual void DisplayThreadInfo(); double CalculateMaxInletTimeStep(); virtual void InitializeClusters(); virtual void GetClustersForce(); virtual void GetRigidBodyElementsForce(); virtual double SolveSolutionStep(); void SearchDEMOperations(ModelPart& r_model_part, bool has_mpi = true); void SearchFEMOperations(ModelPart& r_model_part, bool has_mpi = true) ; virtual void ForceOperations(ModelPart& r_model_part); void GetForce(); void FastGetForce(); virtual void PerformTimeIntegrationOfMotion(int StepFlag = 0); void InitializeSolutionStep(); virtual void BoundingBoxUtility(bool is_time_to_mark_and_remove = true); virtual void FinalizeSolutionStep(); void InitializeElements(); void InitializeDEMElements(); void InitializeFEMElements(); //void InitializeRigidBodyElements(); void InitializeFEMWallsAsRigidBodyElements(ModelPart::SubModelPartsContainerType::iterator& sub_model_part); void MarkToDeleteAllSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart); void ComputeNodalArea(); void ComputeNormalPressureVectorField(); virtual void CalculateConditionsRHSAndAdd(); void ClearFEMForces(); void CalculateNodalPressuresAndStressesOnWalls(); void SetFlagAndVariableToNodes(const Kratos::Flags& r_flag_name, ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void SetVariableToNodes(ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void ResetPrescribedMotionFlagsRespectingImposedDofs(); void ApplyPrescribedBoundaryConditions(); void ApplyInitialConditions(); void SetSearchRadiiOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); void SetNormalRadiiOnAllParticles(ModelPart& r_model_part); void SetSearchRadiiWithFemOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); virtual void SearchNeighbours(); virtual void ComputeNewNeighboursHistoricalData(); virtual void CreateContactElements(); void InitializeContactElements(); // void ContactInitializeSolutionStep(); void PrepareContactElementsForPrinting(); virtual void ComputeNewRigidFaceNeighboursHistoricalData(); virtual void SearchRigidFaceNeighbours(); void CheckHierarchyWithCurrentNeighbours(); /* This should work only with one iteration, but it with mpi does not */ void CalculateInitialMaxIndentations(const ProcessInfo& r_process_info); void PrepareContactModelPart(ModelPart& r_model_part, ModelPart& mcontacts_model_part); void PrepareElementsForPrinting(); void SynchronizeHistoricalVariables(ModelPart& r_model_part); void SynchronizeRHS(ModelPart& r_model_part); void CleanEnergies(); void Check_MPI(bool& has_mpi); ModelPart& GetModelPart() { return (*mpDem_model_part);} ModelPart& GetFemModelPart() { return (*mpFem_model_part);} ModelPart& GetContactModelPart() { return (*mpContact_model_part);} ModelPart& GetClusterModelPart() { return (*mpCluster_model_part);} ModelPart& GetInletModelPart() { return (*mpInlet_model_part);} ModelPart& GetRigidBodyModelPart() { return (*mpRigidBody_model_part);} VectorResultElementsContainerType& GetResults() { return (mResults);} VectorDistanceType& GetResultsDistances() { return (mResultsDistances);} RadiusArrayType& GetArrayOfAmplifiedRadii() { return (mArrayOfAmplifiedRadii);} int& GetNStepSearch() { return (mNStepSearch);} int& GetSearchControl() { return mSearchControl;} int& GetNumberOfThreads() { return (mNumberOfThreads);} double& GetMaxTimeStep() { return (mMaxTimeStep);} double& GetSafetyFactor() { return (mSafetyFactor);} int& GetDeltaOption() { return (mDeltaOption);} ParticleCreatorDestructor::Pointer& GetParticleCreatorDestructor() { return (mpParticleCreatorDestructor);} SpatialSearch::Pointer& GetSpSearch() { return (mpSpSearch);} VectorResultConditionsContainerType& GetRigidFaceResults() { return (mRigidFaceResults);} VectorDistanceType& GetRigidFaceResultsDistances() { return (mRigidFaceResultsDistances);} DEM_FEM_Search::Pointer& GetDemFemSearch() { return (mpDemFemSearch);} virtual ElementsArrayType& GetElements(ModelPart& r_model_part) { return r_model_part.GetCommunicator().LocalMesh().Elements();} virtual ElementsArrayType& GetAllElements(ModelPart& r_model_part) { return r_model_part.Elements(); } protected: Parameters mParameters; bool mRemoveBallsInitiallyTouchingWallsOption; VectorResultElementsContainerType mResults; VectorDistanceType mResultsDistances; RadiusArrayType mArrayOfAmplifiedRadii; int mNStepSearch; int mSearchControl; int mNumberOfThreads; double mMaxTimeStep; double mSafetyFactor; int mDeltaOption; ParticleCreatorDestructor::Pointer mpParticleCreatorDestructor; DEM_FEM_Search::Pointer mpDemFemSearch; SpatialSearch::Pointer mpSpSearch; bool mDoSearchNeighbourElements; bool mDoSearchNeighbourFEMElements; VectorResultConditionsContainerType mRigidFaceResults; VectorDistanceType mRigidFaceResultsDistances; ModelPart *mpFem_model_part; ModelPart *mpDem_model_part; ModelPart *mpInlet_model_part; ModelPart *mpContact_model_part; ModelPart *mpCluster_model_part; ModelPart *mpRigidBody_model_part; std::vector<SphericParticle*> mListOfSphericParticles; std::vector<SphericParticle*> mListOfGhostSphericParticles; }; // Class ExplicitSolverStrategy } // namespace Kratos. #endif // KRATOS_EXPLICIT_SOLVER_STRATEGY defined
t.h
template<typename T> class foo { public: foo() { #pragma omp target { T a; } } };
scimath.c
/****************************************************************************** * Copyright 2019 Kyle Kloberdanz *****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "scimath.h" #define MAX(A, B) ((A) > (B)) ? (A) : (B) #define PAGE_SIZE 4096 ksm_GENERIC_VECTOR_IMPL(void_ptr) __attribute__((__noreturn__)) void greetings() { printf("hello world!\n"); exit(0); } double ksm_first_deriv(double (*f)(double), double x) { return (f(x + ksm_DERIV_H_CONST) - f(x - ksm_DERIV_H_CONST)) / (2 * ksm_DERIV_H_CONST); } double ksm_second_deriv(double (*f)(double), double x) { return (f(x + ksm_DERIV_H_CONST) - 2 * f(x) + f(x - ksm_DERIV_H_CONST)) / (ksm_DERIV_H_CONST * ksm_DERIV_H_CONST); } void ksm_map( double (*f)(double), double *dst, const double *src, size_t size ) { size_t i; #pragma omp parallel for for (i = 0; i < size; i++) { dst[i] = f(src[i]); } } /* * TODO: optimize with SIMD and intrinsic sqrt */ void ksm_vector_f64_sqrt(double *dst, const double *src, size_t size) { size_t i; #pragma omp parallel for for (i = 0; i < size; i++) { dst[i] = sqrt(src[i]); } } static struct MemoryPoolNode *kk_do_malloc(size_t size) { size_t capacity = MAX(size, PAGE_SIZE); void *memory = malloc(capacity); struct MemoryPoolNode *pool = malloc(sizeof(struct MemoryPoolNode)); pool->memory = memory; pool->next = NULL; pool->index = size; pool->capacity = capacity; return pool; } void kk_arena_init(struct Arena *arena) { arena->_pool = NULL; arena->_full_pool = NULL; } void *kk_arena_alloc(size_t size, struct Arena *arena) { start_alloc: if (arena->_pool == NULL) { /* first allocation */ struct MemoryPoolNode *pool = kk_do_malloc(size); arena->_pool = pool; return pool->memory; } else { struct MemoryPoolNode *pool; struct MemoryPoolNode *prev = NULL; struct MemoryPoolNode *full_pool = NULL; for (pool = arena->_pool; pool != NULL; pool = pool->next) { size_t bytes_left = pool->capacity - pool->index; if (bytes_left < 10) { /* remove full pool from active pools list */ if (prev == NULL) { arena->_pool = pool->next; } else { prev->next = pool->next; } /* move full pool to the _full_pool list */ full_pool = arena->_full_pool; if (full_pool == NULL) { arena->_full_pool = pool; } else { arena->_full_pool = pool; pool->next = full_pool; } goto start_alloc; } else if (size <= bytes_left) { /* has available memory in existing pool */ size_t index = pool->index; pool->index += size; return pool->memory + index; } if (pool) { prev = pool; } } /* needs to add new pool */ pool = kk_do_malloc(size); prev->next = pool; return pool->memory; } } static void free_pools(struct MemoryPoolNode *pool) { struct MemoryPoolNode *head; while (pool) { head = pool->next; free(pool->memory); free(pool); pool = head; } } void kk_arena_free_all(struct Arena *arena) { free_pools(arena->_pool); free_pools(arena->_full_pool); } void *kk_track_malloc(size_t size, struct ksm_void_ptr_Vector *vec) { void *ptr = malloc(size); if (ptr != NULL) { ksm_void_ptr_vector_push(vec, ptr); } return ptr; } void kk_track_free(struct ksm_void_ptr_Vector *vec) { size_t i; for (i = 0; i < vec->size; i++) { free(vec->data[i]); } ksm_void_ptr_vector_free(vec); } void kk_btree_init(struct BTree *btree, int (*compare_keys)(void *, void *)) { btree->compare_keys = compare_keys; btree->block = malloc(sizeof(struct BTreeBlock)); btree->size = kk_BTREE_SIZE; } static void kk_btree_do_insertion( struct BTreeBlock *block, char *key, double value, size_t btree_size ) { /* if there is room, just insert it */ if (block->index < btree_size) { struct BTreeNode *new_node; block->index++; new_node = &block->data[block->index]; new_node->key = key; new_node->value = value; /* now sort */ } } void kk_btree_insert(struct BTree *btree, char *key, double value) { kk_btree_do_insertion(btree->block, key, value, btree->size); }
GB_unop__asinh_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fp64_fp64) // op(A') function: GB (_unop_tran__asinh_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = asinh (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinh (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = asinh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = asinh (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = asinh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mandel-omp-row-taskloop.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel{ int num_threads = omp_get_num_threads(); #pragma omp single #pragma omp taskloop grainsize(height/num_threads) for (int row = 0; row < height; ++row) { for (int col = 0; col < width; ++col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { /*#pragma omp critical {*/ XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); //} } #else output[row][col]=k; #endif } } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("parallel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
schedule-modifiers-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (void) { int i; #pragma omp for simd schedule (simd, simd: static, 5) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (monotonic, simd: static) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (simd , monotonic : static, 6) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic, monotonic : static, 7) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic, nonmonotonic : dynamic) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (nonmonotonic , simd : dynamic, 3) for (i = 0; i < 64; i++) ; #pragma omp for simd schedule (nonmonotonic,simd:guided,4) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic: static, 2) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : static) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : dynamic) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : dynamic, 3) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : guided) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : guided, 7) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : runtime) for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic : auto) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : dynamic) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : dynamic, 3) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : guided) for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : guided, 7) for (i = 0; i < 64; i++) ; } void bar (void) { int i; #pragma omp for schedule (nonmonotonic: static, 2) /* { dg-error ".nonmonotonic. modifier specified for .static. schedule kind" } */ for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : static) /* { dg-error ".nonmonotonic. modifier specified for .static. schedule kind" } */ for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : runtime) /* { dg-error ".nonmonotonic. modifier specified for .runtime. schedule kind" } */ for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic : auto) /* { dg-error ".nonmonotonic. modifier specified for .auto. schedule kind" } */ for (i = 0; i < 64; i++) ; #pragma omp for schedule (nonmonotonic, dynamic) ordered /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) #pragma omp ordered ; #pragma omp for ordered schedule(nonmonotonic, dynamic, 5) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) #pragma omp ordered ; #pragma omp for schedule (nonmonotonic, guided) ordered(1) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) { #pragma omp ordered depend(sink: i - 1) #pragma omp ordered depend(source) } #pragma omp for ordered(1) schedule(nonmonotonic, guided, 2) /* { dg-error ".nonmonotonic. schedule modifier specified together with .ordered. clause" } */ for (i = 0; i < 64; i++) { #pragma omp ordered depend(source) #pragma omp ordered depend(sink: i - 1) } #pragma omp for schedule (nonmonotonic , monotonic : dynamic) /* { dg-error "both .monotonic. and .nonmonotonic. modifiers specified" } */ for (i = 0; i < 64; i++) ; #pragma omp for schedule (monotonic,nonmonotonic:dynamic) /* { dg-error "both .monotonic. and .nonmonotonic. modifiers specified" } */ for (i = 0; i < 64; i++) ; }