source
stringlengths
3
92
c
stringlengths
26
2.25M
cpl_error.c
/* * This file is part of the ESO Common Pipeline Library * Copyright (C) 2001-2017 European Southern Observatory * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <string.h> #ifdef HAVE_LIBPTHREAD #include <pthread.h> #endif #include <cxmessages.h> #include <cxutils.h> #include "cpl_error_impl.h" /* Needed for cpl_sprintf() */ #include "cpl_memory.h" /* Needed for CPL_STRINGIFY() */ #include "cpl_tools.h" /* Needed for FLEN_STATUS */ #include <fitsio.h> /** * @defgroup cpl_error Error handling * * This module provides functions to maintain the @em cpl_error_code * set by any CPL function, similarly to what is done with the @em errno * variable of the standard C library. The following guidelines are * respected: * * - If no error occurs in a CPL function the @em cpl_error_code will * remain unchanged. * - If an error occurs in a CPL function, a new CPL error is set, causing * the @em cpl_error_code to be modified to the new error. * * A @em cpl_error_code equal to the enumeration constant * @c CPL_ERROR_NONE would indicate no error condition. Note, * however, that the @em cpl_error_code is only set when an error * occurs, and it is not reset by successful function calls. * For this reason it may be appropriate in some cases to reset * the @em cpl_error_code using the function @c cpl_error_reset(). * The @em cpl_error_code set by a CPL function can be obtained by * calling the function @c cpl_error_get_code(), but functions of * type @em cpl_error_code would not only return this code directly, * but would also return @c CPL_ERROR_NONE in case of success. Other * CPL functions return zero on success, or a non-zero value to indicate * a change of the @em cpl_error_code, while CPL functions returning * a pointer would flag an error by returning a @c NULL. * * To each @em cpl_error_code is associated a standard error message, * that can be obtained by calling the function @c cpl_error_get_message(). * Conventionally, no CPL function will ever display any error message, * leaving to the caller the decision of how to handle a given error * condition. A call to the function @c cpl_error_get_function() would * return the name of the function where the error occurred, and the * functions @c cpl_error_get_file() and @c cpl_error_get_line() would * also return the name of the source file containing the function * code, and the line number where the error occurred. The function * @c cpl_error_get_where() would gather all this items together, in * a colon-separated string. * * @par Synopsis: * @code * #include <cpl_error.h> * @endcode */ /**@{*/ #ifndef CPL_LINESZ #ifdef FITS_LINESZ #define CPL_LINESZ FITS_LINESZ #else #define CPL_LINESZ 6 #endif #endif #define MAX_WHERE_LENGTH (2+(MAX_NAME_LENGTH)+(MAX_FILE_LENGTH)+(CPL_LINESZ)) /*----------------------------------------------------------------------------- Private variables -----------------------------------------------------------------------------*/ #ifdef HAVE_LIBPTHREAD static pthread_rwlock_t cpl_lock_error_status; static pthread_rwlock_t cpl_lock_error_read_only; #endif static cpl_boolean cpl_error_status = CPL_FALSE; static cpl_boolean cpl_error_read_only = CPL_FALSE; #ifdef _OPENMP #pragma omp threadprivate(cpl_error_status, cpl_error_read_only) #endif /*----------------------------------------------------------------------------- Private functions -----------------------------------------------------------------------------*/ static cpl_error * cpl_error_fill(const char *, cpl_error_code, const char *, unsigned); static cpl_error_code cpl_error_set_message_macro_(const char *, cpl_error_code, const char *, unsigned, const char *, va_list) CPL_ATTR_PRINTF(5,0) #ifdef CPL_HAVE_ATTR_NONNULL __attribute__((nonnull(3))) #endif ; /*----------------------------------------------------------------------------- Definitions of functions -----------------------------------------------------------------------------*/ /** * @brief * Reset the @em cpl_error_code. * * @return Nothing. * * This function initialises the @em cpl_error_code to @c CPL_ERROR_NONE. */ void cpl_error_reset(void) { #ifdef HAVE_LIBPTHREAD pthread_rwlock_wrlock(&cpl_lock_error_status); pthread_rwlock_rdlock(&cpl_lock_error_read_only); #endif if (!cpl_error_read_only) cpl_error_status = CPL_FALSE; #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); pthread_rwlock_unlock(&cpl_lock_error_read_only); #endif } /* * @internal * @brief * Set CPL error code, function name, source file & line number where it * occurred along with a text message * * @param function Character string with function name, cpl_func * @param code Error code * @param file Character string with source file name (__FILE__) * @param line Integer with line number (__LINE__) * @param text Text to append to error message, may be printf-like * @param ... Optional, variable argument list for the printf-like text * @return The CPL error code, or CPL_ERROR_UNSPECIFIED if the code is * CPL_ERROR_HISTORY_LOST. * @note This function is only provided for cpl_error_set*() macros. * @see cpl_error_set_message() */ cpl_error_code cpl_error_set_message_macro(const char * function, cpl_error_code code, const char * file, unsigned line, const char * text, ...) { va_list arglist; cpl_error_code lcode; va_start(arglist, text); lcode = cpl_error_set_message_macro_(function, code, file, line, text, arglist); va_end(arglist); return lcode; } /*----------------------------------------------------------------------------*/ /** * @internal * @brief * Set CPL error code, function name, source file, line number where * an FITS error occurred along with a FITS specific text message * @param function Character string with function name, cpl_func * @param code CPL Error code * @param fitscode The error code of the failed (CFITSIO) I/O call * @param fitsfunction Character string with (CFITSIO) function name * @param file Character string with source file name (__FILE__) * @param line Integer with line number (__LINE__) * @param text Text to append to error message, may be printf-like * @param ... Optional, variable argument list for the printf-like text * @return The CPL error code * @see cpl_error_set_message() * @note This function should only be called from the cpl_error_set_fits() macro */ cpl_error_code cpl_error_set_fits_macro(const char * function, cpl_error_code code, int fitscode, const char * fitsfunction, const char * file, unsigned line, const char * text, ...) { char cfitsio_msg[FLEN_ERRMSG]; va_list arglist; cpl_error_code lcode; char * newformat; const cpl_boolean has_name = fitsfunction && strcmp(fitsfunction, "\"\""); fits_get_errstatus(fitscode, cfitsio_msg); cfitsio_msg[FLEN_ERRMSG-1] = '\0' ; /* Better safe than sorry */ newformat = cpl_sprintf("\"%s\" from CFITSIO " #ifdef CFITSIO_VERSION /* Used from v. 3.0 */ "(ver. " CPL_STRINGIFY(CFITSIO_VERSION) ") " #endif "%s%s=%d. %s", cfitsio_msg, has_name ? fitsfunction : "", has_name ? "()" : "error", fitscode, text); va_start(arglist, text); lcode = cpl_error_set_message_macro_(function, code, file, line, newformat, arglist); va_end(arglist); cpl_free(newformat); return lcode; } /*----------------------------------------------------------------------------*/ /** * @internal * @brief * Set CPL error code, function name, source file, line number where * a regex error occurred along with a regex specific text message * @param function Character string with function name, cpl_func * @param code CPL Error code * @param regcode The error code of the failed regcomp() call * @param preg The regex of the failed call * @param file Character string with source file name (__FILE__) * @param line Integer with line number (__LINE__) * @param text Text to append to error message, may be printf-like * @param ... Optional, variable argument list for the printf-like text * @return The CPL error code * @see cpl_error_set_message() * @note This function should only be called from cpl_error_set_regex() */ cpl_error_code cpl_error_set_regex_macro(const char * function, cpl_error_code code, int regcode, const regex_t * preg, const char * file, unsigned line, const char * text, ...) { cpl_error_code lcode; va_list arglist; char * newformat; if (preg == NULL) { /* Passing NULL to regerror() seems to fill with the information of the previous error, which we do not want here */ newformat = cpl_sprintf("regcomp(NULL)=%d. %s", regcode, text); } else { char regex_msg[CPL_ERROR_MAX_MESSAGE_LENGTH]; (void)regerror(regcode, preg, regex_msg, CPL_ERROR_MAX_MESSAGE_LENGTH); newformat = cpl_sprintf("\"%s\" from regcomp()=%d. %s", regex_msg, regcode, text); } va_start(arglist, text); lcode = cpl_error_set_message_macro_(function, code, file, line, newformat, arglist); va_end(arglist); cpl_free(newformat); return lcode; } /*----------------------------------------------------------------------------*/ /** * @internal * @brief * Set CPL error code, function name, source file, line number where * an WCSLIB error occurred along with a WCSLIB specific text message * @param function Character string with function name, cpl_func * @param code CPL Error code * @param wcscode The error code of the failed WCSLIB call * @param wcsfunction Character string with WCSLIB function name * @param wcserrmsg The WCSLIB array of error messages * @param file Character string with source file name (__FILE__) * @param line Integer with line number (__LINE__) * @param text Text to append to error message, may be printf-like * @param ... Optional, variable argument list for the printf-like text * @return The CPL error code * @see cpl_error_set_message() * @note This function should only be called from the cpl_error_set_wcs() macro * */ cpl_error_code cpl_error_set_wcs_macro(const char * function, cpl_error_code code, int wcserror, const char * wcsfunction, const char * wcserrmsg[], const char * file, unsigned line, const char * text, ...) { cpl_error_code lcode; va_list arglist; char * newformat; const cpl_boolean has_name = wcsfunction && strlen(wcsfunction); if (wcserror < 0) { newformat = cpl_sprintf("%s%s=%d < 0. %s", has_name ? wcsfunction : "", has_name ? "()" : "error", wcserror, text); } else if (wcserrmsg == NULL) { newformat = cpl_sprintf("%s%s()=%d. wcs_errmsg[] == NULL. %s", has_name ? wcsfunction : "", has_name ? "()" : "error", wcserror, text); } else if (wcserrmsg[wcserror] == NULL) { newformat = cpl_sprintf("%s%s()=%d. wcs_errmsg[%d] == NULL. %s", has_name ? wcsfunction : "", has_name ? "()" : "error", wcserror, wcserror, text); } else { newformat = cpl_sprintf("\"%s\" from %s%s()=%d. %s", wcserrmsg[wcserror], has_name ? wcsfunction : "", has_name ? "()" : "error", wcserror, text); } va_start(arglist, text); lcode = cpl_error_set_message_macro_(function, code, file, line, newformat, arglist); va_end(arglist); cpl_free(newformat); return lcode; } /** * @brief * Get the last @em cpl_error_code set. * * @return @em cpl_error_code of last occurred CPL error. * * Get @em cpl_error_code of last occurred error. */ cpl_error_code cpl_error_get_code(void) { cpl_error_code code = CPL_ERROR_NONE; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_status); #endif if (cpl_error_status) { const cpl_error * error = cpl_errorstate_find(); code = error->code; } #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif return code; } /** * @brief * Get the text message of the current CPL error. * * @return The text message of the current CPL error. * @see cpl_error_get_message_default(), cpl_error_set_message() * * If the @em cpl_error_code is equal to @c CPL_ERROR_NONE, * an empty string is returned. Otherwise, the message is the default * message for the current CPL error code, possibly extended with a * custom message supplied when the error was set. * */ const char * cpl_error_get_message(void) { const cpl_error * error; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_status); #endif if (!cpl_error_status) { #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif return cpl_error_get_message_default(CPL_ERROR_NONE); } #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif error = cpl_errorstate_find(); /* assert(error->code != CPL_ERROR_NONE); */ return strlen(error->msg) ? error->msg : cpl_error_get_message_default(error->code); } /** * @brief * Get the function name where the last CPL error occurred. * * @return Identifier string of the function name where the last CPL error * occurred. * * Get the function name where the last CPL error occurred. */ const char *cpl_error_get_function(void) { const char * function = ""; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_status); #endif if (cpl_error_status) { const cpl_error * error = cpl_errorstate_find(); function = error->function; } #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif return function; } /** * @brief * Get function name, source file and line number where the last * CPL error occurred. * * @return String containing function name, source file and line number * separated by colons (:). * * Get where the last CPL error occurred in the form * @c function_name:source_file:line_number */ const char *cpl_error_get_where(void) { static char cpl_error_where_string[MAX_WHERE_LENGTH]; #ifdef _OPENMP #pragma omp threadprivate(cpl_error_where_string) #endif (void)cx_snprintf((cxchar *)cpl_error_where_string, (cxsize)MAX_WHERE_LENGTH, (const cxchar *)"%s:%s:%u", cpl_error_get_function(), cpl_error_get_file(), cpl_error_get_line()); return cpl_error_where_string; } /** * @brief * Get the source code file name where the last CPL error occurred. * * @return Name of source file name where the last CPL error occurred. * * Get the source code file name where the last CPL error occurred. */ const char *cpl_error_get_file(void) { const char * file = ""; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_status); #endif if (cpl_error_status) { const cpl_error * error = cpl_errorstate_find(); file = error->file; } #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif return file; } /** * @brief * Get the line number where the last CPL error occurred. * * @return Line number of the source file where the last CPL error occurred. * * Get the line number of the source file where the last CPL error occurred. */ unsigned cpl_error_get_line(void) { unsigned line = 0; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_status); #endif if (cpl_error_status) { const cpl_error * error = cpl_errorstate_find(); line = error->line; } #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif return line; } /*----------------------------------------------------------------------------*/ /** @brief Return the standard CPL error message of the current CPL error @param code The error code of the current CPL error @return The standard CPL error message of the current CPL error */ /*----------------------------------------------------------------------------*/ const char * cpl_error_get_message_default(cpl_error_code code) { const char * message; switch (code) { case CPL_ERROR_NONE: message = ""; break; case CPL_ERROR_UNSPECIFIED: message = "An unspecified error"; break; case CPL_ERROR_HISTORY_LOST: message = "The actual error was lost"; break; case CPL_ERROR_DUPLICATING_STREAM: message = "Cannot duplicate output stream"; break; case CPL_ERROR_ASSIGNING_STREAM: message = "Cannot associate a stream with a file descriptor"; break; case CPL_ERROR_FILE_IO: message = "File read/write error"; break; case CPL_ERROR_BAD_FILE_FORMAT: message = "Bad file format"; break; case CPL_ERROR_FILE_ALREADY_OPEN: message = "File already open"; break; case CPL_ERROR_FILE_NOT_CREATED: message = "File cannot be created"; break; case CPL_ERROR_FILE_NOT_FOUND: message = "File not found"; break; case CPL_ERROR_DATA_NOT_FOUND: message = "Data not found"; break; case CPL_ERROR_ACCESS_OUT_OF_RANGE: message = "Access beyond boundaries"; break; case CPL_ERROR_NULL_INPUT: message = "Null input data"; break; case CPL_ERROR_INCOMPATIBLE_INPUT: message = "Input data do not match"; break; case CPL_ERROR_ILLEGAL_INPUT: message = "Illegal input"; break; case CPL_ERROR_ILLEGAL_OUTPUT: message = "Illegal output"; break; case CPL_ERROR_UNSUPPORTED_MODE: message = "Unsupported mode"; break; case CPL_ERROR_SINGULAR_MATRIX: message = "Singular matrix"; break; case CPL_ERROR_DIVISION_BY_ZERO: message = "Division by zero"; break; case CPL_ERROR_TYPE_MISMATCH: message = "Type mismatch"; break; case CPL_ERROR_INVALID_TYPE: message = "Invalid type"; break; case CPL_ERROR_CONTINUE: message = "The iterative process did not converge"; break; case CPL_ERROR_NO_WCS: message = "The WCS functionalities are missing"; break; case CPL_ERROR_EOL: message = "A user-defined error"; break; default: message = "A user-defined error"; break; } return message; } #ifdef HAVE_LIBPTHREAD /*----------------------------------------------------------------------------*/ /** @brief Lock the RW locks of the global variables in this module */ /*----------------------------------------------------------------------------*/ void cpl_error_init_locks(void) { pthread_rwlock_init(&cpl_lock_error_status, NULL); pthread_rwlock_init(&cpl_lock_error_read_only, NULL); } #endif /**@}*/ /*----------------------------------------------------------------------------*/ /** @internal @brief Get the status of the CPL error state @return True iff an error code has been set @note This function may only be used by the cpl_errorstate module. CPL_FALSE: The CPL error state is clear, no history may be read. CPL_TRUE: The CPL error state has been set, and contains at least one CPL error state (with a non-zero error code). */ /*----------------------------------------------------------------------------*/ cpl_boolean cpl_error_is_set(void) { cpl_boolean status; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_status); #endif status = cpl_error_status; #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif return status; } /*----------------------------------------------------------------------------*/ /** @internal @brief Get the read-only status of the CPL error system @return True iff the CPL error system is read-only @note This function may only be used by the cpl_errorstate module. */ /*----------------------------------------------------------------------------*/ cpl_boolean cpl_error_is_readonly(void) { cpl_boolean read_only; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_read_only); #endif read_only = cpl_error_read_only; #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_read_only); #endif return read_only; } /*----------------------------------------------------------------------------*/ /** @internal @brief Set the status of the CPL error system to read-only @note This function may only be used by the cpl_errorstate module. */ /*----------------------------------------------------------------------------*/ void cpl_error_set_readonly(void) { #ifdef HAVE_LIBPTHREAD pthread_rwlock_wrlock(&cpl_lock_error_read_only); #endif cpl_error_read_only = CPL_TRUE; #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_read_only); #endif } /*----------------------------------------------------------------------------*/ /** @internal @brief Set the status of the CPL error system to read/write @note This function may only be used by the cpl_errorstate module. */ /*----------------------------------------------------------------------------*/ void cpl_error_reset_readonly(void) { #ifdef HAVE_LIBPTHREAD pthread_rwlock_wrlock(&cpl_lock_error_read_only); #endif cpl_error_read_only = CPL_FALSE; #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_read_only); #endif } /* * @internal * @brief Set the basics of a CPL error and return the struct * @param function Character string with function name, cpl_func * @param code Error code * @param file Character string with source file name (__FILE__) * @param line Integer with line number (__LINE__) * @return A pointer to the struct of the error. * @see cpl_error_set_message_macro() */ static cpl_error * cpl_error_fill(const char * function, cpl_error_code code, const char * file, unsigned line) { cpl_error * error = cpl_errorstate_append(); cx_assert(error != NULL); cx_assert(code != CPL_ERROR_NONE); #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_read_only); #endif cx_assert(!cpl_error_read_only); #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_read_only); pthread_rwlock_wrlock(&cpl_lock_error_status); #endif cpl_error_status = CPL_TRUE; #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_status); #endif error->code = code; error->line = line; if (function == NULL) { error->function[0] = '\0'; } else { (void)strncpy(error->function, function, MAX_NAME_LENGTH); error->function[MAX_NAME_LENGTH] = '\0'; } if (file == NULL) { error->file[0] = '\0'; } else { (void)strncpy(error->file, file, MAX_FILE_LENGTH); error->file[MAX_FILE_LENGTH] = '\0'; } error->msg[0] = '\0'; return error; } /* * @internal * @brief * Set CPL error code, function name, source file & line number where it * occurred along with a text message and a variable argument list * @param function Character string with function name (cpl_func) * @param code Error code * @param file Character string with source file name (__FILE__) * @param line Integer with line number (__LINE__) * @param text Text to append to error message, may be printf-like * @param arglist Optional, variable argument list for the printf-like text * @return The CPL error code, or CPL_ERROR_UNSPECIFIED if the code is * CPL_ERROR_HISTORY_LOST. * @see cpl_error_set_message_macro() */ static cpl_error_code cpl_error_set_message_macro_(const char * function, cpl_error_code code, const char * file, unsigned line, const char * text, va_list arglist) { /* Check copied from cpl_error_set_message_one_macro() */ char * message = (text != NULL && text[0] != '\0' && (text[0] != ' ' || text[1] != '\0')) ? cpl_vsprintf(text, arglist) : NULL; const char * usemsg = message ? message : text; const cpl_error_code lcode = cpl_error_set_message_one_macro(function, code, file, line, usemsg); cpl_free((void*)message); return lcode; } /* * @internal * @brief * Set CPL error code, function name, source file & line number where it * occurred along with a text message and a variable argument list * @param function Character string with function name (cpl_func) * @param code Error code * @param file Character string with source file name (__FILE__) * @param line Integer with line number (__LINE__) * @param text Text to append to error message * @return The CPL error code, or CPL_ERROR_UNSPECIFIED if the code is * CPL_ERROR_HISTORY_LOST. * @see cpl_error_set_message_macro_() */ cpl_error_code cpl_error_set_message_one_macro(const char * function, cpl_error_code code, const char * file, unsigned line, const char * text) { const cpl_error_code lcode = code == CPL_ERROR_HISTORY_LOST ? CPL_ERROR_UNSPECIFIED : code; #ifdef HAVE_LIBPTHREAD pthread_rwlock_rdlock(&cpl_lock_error_read_only); #endif if (!cpl_error_read_only && code != CPL_ERROR_NONE) { cpl_error * error = cpl_error_fill(function, lcode, file, line); if (text != NULL && text[0] != '\0' && (text[0] != ' ' || text[1] != '\0')) { /* The user supplied a message */ /* Calling this function with text NULL or empty is supported, but causes a compiler warning on some systems. To support calls that do not set a message, call with a single space causes that user message to be ignored. */ /* Concatenate the standard message and the user supplied message */ cx_assert( error != NULL ); (void)cx_snprintf((cxchar *)error->msg, (cxsize)CPL_ERROR_MAX_MESSAGE_LENGTH, (const cxchar *)"%s: %s", cpl_error_get_message_default(lcode), text); } } #ifdef HAVE_LIBPTHREAD pthread_rwlock_unlock(&cpl_lock_error_read_only); #endif return lcode; }
GB_unop__carg_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__carg_fp32_fc32 // op(A') function: GB_unop_tran__carg_fp32_fc32 // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cargf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cargf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cargf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CARG || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__carg_fp32_fc32 ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cargf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__carg_fp32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pmghosts.c
#include <string.h> #include <math.h> #include <mpi.h> #include <fastpm/libfastpm.h> #include <fastpm/logging.h> #include "pmpfft.h" #include "pmghosts.h" #ifdef ENABLE_VALGRIND #include </usr/include/valgrind/memcheck.h> #endif typedef void (*pm_iter_ghosts_func)(PM * pm, PMGhostData * ppd, void * userdata); void pm_ghosts_free(PMGhostData * pgd) { if(pgd->p) { fastpm_store_destroy(pgd->p); free(pgd->p); } fastpm_memory_free(pgd->pm->mem, pgd->ighost_to_ipar); free(pgd->Nsend); free(pgd->Osend); free(pgd->Nrecv); free(pgd->Orecv); free(pgd); } static void pm_iter_ghosts(PM * pm, PMGhostData * pgd, pm_iter_ghosts_func iter_func, void * userdata) { ptrdiff_t i; for (i = 0; i < pgd->source->np; i ++) { PMGhostData localppd = *pgd; double pos[3]; int rank; fastpm_store_get_position(pgd->source, i, pos); int d; /* how far the window expands. */ int left[3]; int right[3]; for(d = 0; d < 3; d ++) { /* this condition is not tightest for CIC painting, because * a particle touches a cell doesn't mean cic touches the left edge * of the cell. * */ left[d] = floor(pos[d] * pm->InvCellSize[d] + pgd->Below[d]); right[d] = floor(pos[d] * pm->InvCellSize[d] + pgd->Above[d]); } /* probe neighbours */ int j[3]; int ranks[1000]; int used = 0; localppd.ipar = i; /* no need to run the z loop because the decomposition is in xy */ for(j[2] = left[2]; j[2] <= right[2]; j[2] ++) for(j[0] = left[0]; j[0] <= right[0]; j[0] ++) for(j[1] = left[1]; j[1] <= right[1]; j[1] ++) { rank = pm_ipos_to_rank(pm, j); if(LIKELY(rank == pm->ThisTask)) continue; int ptr; for(ptr = 0; ptr < used; ptr++) { if(rank == ranks[ptr]) break; } if(UNLIKELY(ptr == used)) { ranks[used++] = rank; localppd.rank = rank; localppd.reason = j; iter_func(pm, &localppd, userdata); } } } } static void count_ghosts(PM * pm, PMGhostData * pgd, void * userdata) { #pragma omp atomic pgd->Nsend[pgd->rank] ++; } static void build_ghost_buffer(PM * pm, PMGhostData * pgd, void * userdata) { FastPMPackingPlan * plan = userdata; int ighost; int offset; #pragma omp atomic capture offset = pgd->Nsend[pgd->rank] ++; ighost = pgd->Osend[pgd->rank] + offset; fastpm_packing_plan_pack(plan, pgd->source, pgd->ipar, (char*) pgd->send_buffer + ighost * plan->elsize); pgd->ighost_to_ipar[ighost] = pgd->ipar; } /* create ghosts that can hold 'attributes'; * use pm_ghosts_send to send subsets of `attributes`; * */ PMGhostData * pm_ghosts_create(PM * pm, FastPMStore * p, FastPMColumnTags attributes, int support) { /* The support of CIC is 2. We do not use * -1.0000 * pm->CellSize[d] here * because even though the kernel touches -1 * cellsize, * we do not paint on the lower edge. * */ double Below[3]; /* in grid integer units */ double Above[3]; int d; for(d = 0; d < 3; d ++) { Below[d] = - (support * 0.5 - 1); Above[d] = (support * 0.5 ); } return pm_ghosts_create_full(pm, p, attributes, Below, Above); } PMGhostData * pm_ghosts_create_full(PM * pm, FastPMStore * p, FastPMColumnTags attributes, double below[], double above[] ) { PMGhostData * pgd = malloc(sizeof(pgd[0])); pgd->pm = pm; pgd->source = p; int d; for(d = 0; d < 3; d++) { pgd->Below[d] = below[d]; pgd->Above[d] = above[d]; } pgd->ighost_to_ipar = NULL; pgd->Nsend = calloc(pm->NTask, sizeof(int)); pgd->Osend = calloc(pm->NTask, sizeof(int)); pgd->Nrecv = calloc(pm->NTask, sizeof(int)); pgd->Orecv = calloc(pm->NTask, sizeof(int)); size_t Nsend; size_t Nrecv; memset(pgd->Nsend, 0, sizeof(pgd->Nsend[0]) * pm->NTask); pm_iter_ghosts(pm, pgd, count_ghosts, NULL); Nsend = cumsum(pgd->Osend, pgd->Nsend, pm->NTask); MPI_Alltoall(pgd->Nsend, 1, MPI_INT, pgd->Nrecv, 1, MPI_INT, pm->Comm2D); Nrecv = cumsum(pgd->Orecv, pgd->Nrecv, pm->NTask); double nmin, nmax, nmean, nstd; MPIU_stats(pm->Comm2D, Nsend, "<->s", &nmin, &nmean, &nmax, &nstd); fastpm_info("Sending ghosts: min = %g max = %g mean = %g std = %g\n", nmin, nmax, nmean, nstd); MPIU_stats(pm->Comm2D, Nrecv, "<->s", &nmin, &nmean, &nmax, &nstd); fastpm_info("Receiving ghosts: min = %g max = %g mean = %g std = %g\n", nmin, nmax, nmean, nstd); pgd->ighost_to_ipar = fastpm_memory_alloc(pm->mem, "Ghost2Par", Nsend * sizeof(int), FASTPM_MEMORY_HEAP); pgd->p = malloc(sizeof(pgd->p[0])); fastpm_store_init(pgd->p, pgd->source->name, Nrecv, attributes, FASTPM_MEMORY_HEAP); memcpy(&pgd->p->meta, &pgd->source->meta, sizeof(pgd->source->meta)); return pgd; } void pm_ghosts_has_ghosts(PMGhostData * pgd, uint8_t * has_ghosts) { size_t Nsend = cumsum(NULL, pgd->Nsend, pgd->pm->NTask); ptrdiff_t i; for(i = 0; i < pgd->source->np; i ++) { has_ghosts[i] = 0; } for(i = 0; i < Nsend; i ++) { has_ghosts[pgd->ighost_to_ipar[i]] = 1; } } void pm_ghosts_send(PMGhostData * pgd, FastPMColumnTags attributes) { PM * pm = pgd->pm; ptrdiff_t i; size_t Nsend; size_t Nrecv; Nsend = cumsum(pgd->Osend, pgd->Nsend, pm->NTask); Nrecv = cumsum(pgd->Orecv, pgd->Nrecv, pm->NTask); FastPMPackingPlan plan[1]; fastpm_packing_plan_init(plan, pgd->p, attributes); pgd->send_buffer = fastpm_memory_alloc(pm->mem, "SendBuf", Nsend * plan->elsize, FASTPM_MEMORY_STACK); pgd->recv_buffer = fastpm_memory_alloc(pm->mem, "RecvBuf", Nrecv * plan->elsize, FASTPM_MEMORY_STACK); /* build buffer */ memset(pgd->Nsend, 0, sizeof(pgd->Nsend[0]) * pm->NTask); pm_iter_ghosts(pm, pgd, build_ghost_buffer, plan); /* exchange */ pgd->p->np = Nrecv; MPI_Datatype GHOST_TYPE; MPI_Type_contiguous(plan->elsize, MPI_BYTE, &GHOST_TYPE); MPI_Type_commit(&GHOST_TYPE); MPI_Alltoallv_sparse(pgd->send_buffer, pgd->Nsend, pgd->Osend, GHOST_TYPE, pgd->recv_buffer, pgd->Nrecv, pgd->Orecv, GHOST_TYPE, pm->Comm2D); MPI_Type_free(&GHOST_TYPE); #pragma omp parallel for for(i = 0; i < Nrecv; i ++) { fastpm_packing_plan_unpack(plan, pgd->p, i, (char*) pgd->recv_buffer + i * plan->elsize); } fastpm_memory_free(pm->mem, pgd->recv_buffer); fastpm_memory_free(pm->mem, pgd->send_buffer); } void pm_ghosts_reduce(PMGhostData * pgd, FastPMColumnTags attribute, reduce_func reduce, void * userdata ) { int ci = fastpm_store_find_column_id(pgd->p, attribute); PM * pm = pgd->pm; size_t Nsend = cumsum(NULL, pgd->Nsend, pm->NTask); size_t Nrecv = cumsum(NULL, pgd->Nrecv, pm->NTask); ptrdiff_t i; size_t elsize; elsize = pgd->p->_column_info[ci].elsize; pgd->recv_buffer = fastpm_memory_alloc(pm->mem, "RecvBuf", Nrecv * elsize, FASTPM_MEMORY_STACK); pgd->send_buffer = fastpm_memory_alloc(pm->mem, "SendBuf", Nsend * elsize, FASTPM_MEMORY_STACK); #pragma omp parallel for for(i = 0; i < pgd->p->np; i ++) { pgd->p->_column_info[ci].pack(pgd->p, i, ci, (char*) pgd->recv_buffer + i * elsize); } MPI_Datatype GHOST_TYPE; MPI_Type_contiguous(elsize, MPI_BYTE, &GHOST_TYPE); MPI_Type_commit(&GHOST_TYPE); MPI_Alltoallv_sparse(pgd->recv_buffer, pgd->Nrecv, pgd->Orecv, GHOST_TYPE, pgd->send_buffer, pgd->Nsend, pgd->Osend, GHOST_TYPE, pm->Comm2D); MPI_Type_free(&GHOST_TYPE); FastPMStore q[1]; fastpm_store_init(q, pgd->p->name, Nsend, attribute, FASTPM_MEMORY_HEAP); /* now reduce the attributes. */ int ighost; /* this loop is not parallel because multiple ghosts can be for the same ipar, * in which case we have a race condition. * we can fix this by carefully working with ipar (it should / could be made sorted) * but unlikly worth the effort. * */ for(ighost = 0; ighost < Nsend; ighost ++) { pgd->p->_column_info[ci].unpack(q, ighost, ci, (char*) pgd->send_buffer + ighost * elsize); } for(ighost = 0; ighost < Nsend; ighost ++) { reduce(q, ighost, pgd->source, pgd->ighost_to_ipar[ighost], ci, userdata); } fastpm_store_destroy(q); fastpm_memory_free(pm->mem, pgd->send_buffer); fastpm_memory_free(pm->mem, pgd->recv_buffer); }
GB_binop__first_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_bool // A.*B function (eWiseMult): GB_AemultB__first_bool // A*D function (colscale): GB_AxD__first_bool // D*A function (rowscale): GB_DxB__first_bool // C+=B function (dense accum): GB_Cdense_accumB__first_bool // C+=b function (dense accum): GB_Cdense_accumb__first_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_bool // C=scalar+B GB_bind1st__first_bool // C=scalar+B' GB_bind1st_tran__first_bool // C=A+scalar (none) // C=A'+scalar (none) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = aij #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_BOOL || GxB_NO_FIRST_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__first_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
bcnn_mat.c
/* * Copyright (c) 2016-present Jean-Noel Braun. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "bcnn_mat.h" #include <math.h> #include <bh/bh_log.h> #include <bh/bh_macros.h> #include <bh/bh_mem.h> #include "bcnn/bcnn.h" #include <bh/bh_timer.h> #if (defined(__aarch64__)) #include "openblas/openblas_sgemm.h" #endif int bcnn_fill_f32(int n, float a, float *x) { int i; for (i = 0; i < n; ++i) { x[i] = a; } return 0; } int bcnn_copy_f32(int n, float *x, float *y) { memcpy(y, x, n * sizeof(float)); return 0; } int bcnn_axpy(int n, float a, float *x, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) y[i] += a * x[i]; #else int i, nd, nm; __m256 sum0; __m256 sum1; __m256 reg0, reg1, reg2, reg3; __m256 areg = _mm256_set1_ps(a); __m256 prod; int data_is_aligned = bh_is_aligned32(x) & bh_is_aligned32(y); nd = n / 16 * 16; nm = n % 16; if (data_is_aligned) { for (i = 0; i < nd; i += 16) { reg0 = _mm256_load_ps(x + 0); reg1 = _mm256_load_ps(x + 8); reg2 = _mm256_load_ps(y + 0); reg3 = _mm256_load_ps(y + 8); prod = _mm256_mul_ps(reg0, areg); sum0 = _mm256_add_ps(prod, reg2); prod = _mm256_mul_ps(reg1, areg); sum1 = _mm256_add_ps(prod, reg3); _mm256_store_ps(y + 0, sum0); _mm256_store_ps(y + 8, sum1); x += 16; y += 16; } } else { for (i = 0; i < nd; i += 16) { reg0 = _mm256_loadu_ps(x + 0); reg1 = _mm256_loadu_ps(x + 8); reg2 = _mm256_loadu_ps(y + 0); reg3 = _mm256_loadu_ps(y + 8); prod = _mm256_mul_ps(reg0, areg); sum0 = _mm256_add_ps(prod, reg2); prod = _mm256_mul_ps(reg1, areg); sum1 = _mm256_add_ps(prod, reg3); _mm256_storeu_ps(y + 0, sum0); _mm256_storeu_ps(y + 8, sum1); x += 16; y += 16; } } for (i = 0; i < nm; ++i) y[i] += a * x[i]; #endif return 0; } int bcnn_axpby(int n, float a, float *x, float b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) y[i] = a * x[i] + b * y[i]; #else int i, nd, nm; __m256 sum0; __m256 sum1; __m256 reg0, reg1, reg2, reg3; __m256 areg = _mm256_set1_ps(a); __m256 breg = _mm256_set1_ps(b); __m256 prod0, prod1; int data_is_aligned = bh_is_aligned32(x) & bh_is_aligned32(y); nd = n / 16 * 16; nm = n % 16; if (data_is_aligned) { for (i = 0; i < nd; i += 16) { reg0 = _mm256_load_ps(x + 0); reg1 = _mm256_load_ps(x + 8); reg2 = _mm256_load_ps(y + 0); reg3 = _mm256_load_ps(y + 8); prod0 = _mm256_mul_ps(reg0, areg); prod1 = _mm256_mul_ps(reg2, breg); sum0 = _mm256_add_ps(prod0, prod1); prod0 = _mm256_mul_ps(reg1, areg); prod1 = _mm256_mul_ps(reg3, breg); sum1 = _mm256_add_ps(prod0, prod1); _mm256_store_ps(y + 0, sum0); _mm256_store_ps(y + 8, sum1); x += 16; y += 16; } } else { for (i = 0; i < nd; i += 16) { reg0 = _mm256_loadu_ps(x + 0); reg1 = _mm256_loadu_ps(x + 8); reg2 = _mm256_loadu_ps(y + 0); reg3 = _mm256_loadu_ps(y + 8); prod0 = _mm256_mul_ps(reg0, areg); prod1 = _mm256_mul_ps(reg2, breg); sum0 = _mm256_add_ps(prod0, prod1); prod0 = _mm256_mul_ps(reg1, areg); prod1 = _mm256_mul_ps(reg3, breg); sum1 = _mm256_add_ps(prod0, prod1); _mm256_storeu_ps(y + 0, sum0); _mm256_storeu_ps(y + 8, sum1); x += 16; y += 16; } } for (i = 0; i < nm; ++i) y[i] = a * x[i] + b * y[i]; #endif return 0; } void bcnn_axpy_strided(int num_batches, float a, float *x, float *y, int stride[2], int x_dim[3], int y_dim[3], int min_dim[3]) { for (int n = 0; n < num_batches; ++n) { for (int k = 0; k < min_dim[0]; ++k) { for (int j = 0; j < min_dim[1]; ++j) { for (int i = 0; i < min_dim[2]; ++i) { int dst_ind = i * stride[0] + y_dim[2] * (j * stride[0] + y_dim[1] * (y_dim[0] * n + k)); int src1_ind = i * stride[1] + x_dim[2] * (j * stride[1] + x_dim[1] * (x_dim[0] * n + k)); y[dst_ind] += a * x[src1_ind]; } } } } } int bcnn_pow(int n, float *x, float a, float *y) { int i; for (i = 0; i < n; ++i) { y[i] = powf(x[i], a); } return 0; } int bcnn_vadd(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] = a[i] + b[i]; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_add_ps(r0, r2); r1 = _mm_add_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] = a[i] + b[i]; } #endif return 0; } int bcnn_vsub(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] = a[i] - b[i]; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_sub_ps(r0, r2); r1 = _mm_sub_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] = a[i] - b[i]; } #endif return 0; } int bcnn_vmul(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] = a[i] * b[i]; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_mul_ps(r0, r2); r1 = _mm_mul_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] = a[i] * b[i]; } #endif return 0; } int bcnn_vdiv(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { if (bh_abs(b[i]) > 0.00001f) y[i] = a[i] / b[i]; else y[i] = 0.0f; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_div_ps(r0, r2); r1 = _mm_div_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { if (bh_abs(b[i]) > 0.00001f) y[i] = a[i] / b[i]; else y[i] = 0.0f; } #endif return 0; } int bcnn_scal(int n, float a, float *x) { #ifndef BCNN_USE_AVX int i; if (a == 0.0f) { memset(x, 0, n * sizeof(float)); } else if (a != 1.0f) { for (i = 0; i < n; ++i) x[i] *= a; } #else int i, nd, nm; __m128 reg0, reg1; __m128 areg = _mm_set1_ps(a); __m128 prod; int data_is_aligned = bh_is_aligned32(x); if (a == 0.0f) { memset(x, 0, n * sizeof(float)); } else if (a != 1.0f) { nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { reg0 = _mm_load_ps(x + 0); reg1 = _mm_load_ps(x + 4); prod = _mm_mul_ps(reg0, areg); _mm_store_ps(x + 0, prod); prod = _mm_mul_ps(reg1, areg); _mm_store_ps(x + 4, prod); x += 8; } } else { for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(x + 0); reg1 = _mm_loadu_ps(x + 4); prod = _mm_mul_ps(reg0, areg); _mm_storeu_ps(x + 0, prod); prod = _mm_mul_ps(reg1, areg); _mm_storeu_ps(x + 4, prod); x += 8; } } for (i = 0; i < nm; ++i) x[i] *= a; } #endif return 0; } int bcnn_add_scalar(int n, float a, float *x) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { x[i] += a; } #else int i, nd, nm; __m128 reg0, reg1; __m128 areg = _mm_set1_ps(a); __m128 prod; int data_is_aligned = bh_is_aligned32(x); if (a == 0.0f) { return 0; } else if (a != 1.0f) { nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { reg0 = _mm_load_ps(x + 0); reg1 = _mm_load_ps(x + 4); prod = _mm_add_ps(reg0, areg); _mm_store_ps(x + 0, prod); prod = _mm_add_ps(reg1, areg); _mm_store_ps(x + 4, prod); x += 8; } } else { for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(x + 0); reg1 = _mm_loadu_ps(x + 4); prod = _mm_add_ps(reg0, areg); _mm_storeu_ps(x + 0, prod); prod = _mm_add_ps(reg1, areg); _mm_storeu_ps(x + 4, prod); x += 8; } } for (i = 0; i < nm; ++i) x[i] += a; } #endif return 0; } float bcnn_dot(int n, float *x, float *y) { #ifndef BCNN_USE_AVX int i; float dot = 0; for (i = 0; i < n; ++i) dot += x[i] * y[i]; return dot; #else int i, nd, nm; float sum = 0; float sum_res[4]; __m128 sum_r = _mm_setzero_ps(); __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(x); r1 = _mm_loadu_ps(y); r2 = _mm_loadu_ps(x + 4); r3 = _mm_loadu_ps(y + 4); r0 = _mm_mul_ps(r0, r1); r2 = _mm_mul_ps(r2, r3); sum_r = _mm_add_ps(sum_r, r0); sum_r = _mm_add_ps(sum_r, r2); x += 8; y += 8; } _mm_storeu_ps(sum_res, sum_r); sum += sum_res[0] + sum_res[1] + sum_res[2] + sum_res[3]; for (i = 0; i < nm; ++i) sum += x[i] * y[i]; return sum; #endif } int bcnn_vsum(int n, float *x, float *sum) { #ifndef BCNN_USE_AVX int i; float s = 0.0f; for (i = 0; i < n; ++i) s += x[i]; *(sum) = s; #else int i, nd, nm; float s = 0.0f; float sum_res[4]; __m128 sum_r = _mm_setzero_ps(); __m128 r0, r1; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(x); r1 = _mm_loadu_ps(x + 4); sum_r = _mm_add_ps(sum_r, r0); sum_r = _mm_add_ps(sum_r, r1); x += 8; } _mm_storeu_ps(sum_res, sum_r); s += sum_res[0] + sum_res[1] + sum_res[2] + sum_res[3]; for (i = 0; i < nm; ++i) s += x[i]; *(sum) = s; #endif return 0; } int bcnn_gemv(int trans_a, int m, int n, float alpha, float *a, float *x, float beta, float *y) { int i, j; #ifdef BCNN_USE_AVX int nd, md; __m128 apart, mula, mul0, areg, xreg, yreg; float sum[4] = {0}; #endif if (!trans_a) { if (beta != 1.0f) { for (i = 0; i < m; ++i) { y[i] *= beta; } } #ifndef BCNN_USE_AVX for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { y[i] += alpha * a[i * n + j] * x[j]; } } #else nd = n / 4 * 4; apart = _mm_set1_ps(alpha); for (i = 0; i < m; ++i) { memset(sum, 0, 4 * sizeof(float)); yreg = _mm_setzero_ps(); for (j = 0; j < nd; j += 4) { areg = _mm_loadu_ps(&a[i * n + j]); xreg = _mm_loadu_ps(&x[j]); mula = _mm_mul_ps(apart, areg); mul0 = _mm_mul_ps(xreg, mula); yreg = _mm_add_ps(yreg, mul0); } _mm_storeu_ps(sum, yreg); y[i] += sum[0] + sum[1] + sum[2] + sum[3]; for (; j < n; ++j) y[i] += alpha * a[i * n + j] * x[j]; } #endif } else { if (beta != 1.0f) { for (i = 0; i < n; ++i) { y[i] *= beta; } } #ifndef BCNN_USE_AVX for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { y[i] += alpha * a[i * m + j] * x[j]; } } #else md = m / 4 * 4; apart = _mm_set1_ps(alpha); for (i = 0; i < n; ++i) { memset(sum, 0, 4 * sizeof(float)); yreg = _mm_setzero_ps(); for (j = 0; j < md; j += 4) { areg = _mm_loadu_ps(&a[i * m + j]); xreg = _mm_loadu_ps(&x[j]); mula = _mm_mul_ps(apart, areg); mul0 = _mm_mul_ps(xreg, mula); yreg = _mm_add_ps(yreg, mul0); } _mm_storeu_ps(sum, yreg); y[i] += sum[0] + sum[1] + sum[2] + sum[3]; for (; j < m; ++j) y[i] += alpha * a[i * m + j] * x[j]; } #endif } return 0; } float bcnn_l2_distance(float *x, float *y, int n) { float dist = 0.0f; int i; #ifdef BCNN_USE_AVX int data_is_aligned = bh_is_aligned32(x) & bh_is_aligned32(y); __m128 vx0, vy0, vx1, vy1, vdiff0, vdiff1; __m128 vdist = _mm_set1_ps(0.0f); float dist4f[4] = {0.0f}; int nd, nm; nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { vx0 = _mm_load_ps(x); vy0 = _mm_load_ps(y); vx1 = _mm_load_ps(x + 4); vy1 = _mm_load_ps(y + 4); vdiff0 = _mm_sub_ps(vx0, vy0); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, vy1); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; y += 8; } _mm_store_ps(dist4f, vdist); } else { for (i = 0; i < nd; i += 8) { vx0 = _mm_loadu_ps(x); vy0 = _mm_loadu_ps(y); vx1 = _mm_loadu_ps(x + 4); vy1 = _mm_loadu_ps(y + 4); vdiff0 = _mm_sub_ps(vx0, vy0); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, vy1); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; y += 8; } _mm_storeu_ps(dist4f, vdist); } dist += dist4f[0] + dist4f[1] + dist4f[2] + dist4f[3]; for (i = 0; i < nm; ++i) dist += (x[i] - y[i]) * (x[i] - y[i]); #else for (i = 0; i < n; ++i) dist += (x[i] - y[i]) * (x[i] - y[i]); #endif return dist; } float bcnn_sqrdiff_vs(float *x, float a, int n) { float dist = 0.0f; int i; #ifndef BCNN_USE_AVX for (i = 0; i < n; ++i) dist += (x[i] - a) * (x[i] - a); #else int data_is_aligned = bh_is_aligned32(x); __m128 vx0, vx1, vdiff0, vdiff1; __m128 vdist = _mm_set1_ps(0.0f); __m128 areg = _mm_set1_ps(a); float dist4f[4] = {0.0f}; int nd, nm; nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { vx0 = _mm_load_ps(x); vx1 = _mm_load_ps(x + 4); vdiff0 = _mm_sub_ps(vx0, areg); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, areg); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; } _mm_store_ps(dist4f, vdist); } else { for (i = 0; i < nd; i += 8) { vx0 = _mm_loadu_ps(x); vx1 = _mm_loadu_ps(x + 4); vdiff0 = _mm_sub_ps(vx0, areg); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, areg); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; } _mm_storeu_ps(dist4f, vdist); } dist += dist4f[0] + dist4f[1] + dist4f[2] + dist4f[3]; for (i = 0; i < nm; ++i) dist += (x[i] - a) * (x[i] - a); #endif return dist; } float bcnn_shiftdot(int n, float *x, float a, float *y, float b) { #ifndef BCNN_USE_AVX int i; float dot = 0; for (i = 0; i < n; ++i) dot += (x[i] - a) * (y[i] - b); return dot; #else int i, nd, nm; float sum = 0; float sum_res[4]; __m128 sum_r = _mm_setzero_ps(); __m128 r0, r1, r2, r3; __m128 areg = _mm_set1_ps(a); __m128 breg = _mm_set1_ps(b); nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(x); r1 = _mm_loadu_ps(y); r2 = _mm_loadu_ps(x + 4); r3 = _mm_loadu_ps(y + 4); r0 = _mm_sub_ps(r0, areg); r1 = _mm_sub_ps(r1, breg); r2 = _mm_sub_ps(r2, areg); r3 = _mm_sub_ps(r3, breg); r0 = _mm_mul_ps(r0, r1); r2 = _mm_mul_ps(r2, r3); sum_r = _mm_add_ps(sum_r, r0); sum_r = _mm_add_ps(sum_r, r2); x += 8; y += 8; } _mm_storeu_ps(sum_res, sum_r); sum += sum_res[0] + sum_res[1] + sum_res[2] + sum_res[3]; for (i = 0; i < nm; ++i) sum += (x[i] - a) * (y[i] - b); return sum; #endif } int bcnn_varnorm(int n, float *a, float c, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] *= c / (a[i] * sqrtf(a[i]) + 0.00001f); } #else int i, nd, nm; __m128 r0, r1, reg0, reg1; __m128 creg = _mm_set1_ps(c); __m128 epsreg = _mm_set1_ps(0.00001f); nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(y); reg1 = _mm_loadu_ps(y + 4); r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r0 = _mm_mul_ps( reg0, _mm_div_ps(creg, _mm_add_ps(_mm_mul_ps(r0, _mm_sqrt_ps(r0)), epsreg))); r1 = _mm_mul_ps( reg1, _mm_div_ps(creg, _mm_add_ps(_mm_mul_ps(r1, _mm_sqrt_ps(r1)), epsreg))); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] *= c / (a[i] * sqrtf(a[i]) + 0.00001f); } #endif return 0; } int bcnn_varmean(int n, float *m, float a, float *var) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { var[i] = var[i] * a - m[i] * m[i]; } #else int i, nd, nm; __m128 r0, r1, reg0, reg1; __m128 areg = _mm_set1_ps(a); nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(var); reg1 = _mm_loadu_ps(var + 4); r0 = _mm_loadu_ps(m); r1 = _mm_loadu_ps(m + 4); r0 = _mm_sub_ps(_mm_mul_ps(reg0, areg), _mm_mul_ps(r0, r0)); r1 = _mm_sub_ps(_mm_mul_ps(reg1, areg), _mm_mul_ps(r1, r1)); _mm_storeu_ps(var, r0); _mm_storeu_ps(var + 4, r1); m += 8; var += 8; } for (i = 0; i < nm; ++i) { var[i] = var[i] * a - m[i] * m[i]; } #endif return 0; } void bcnn_add_bias(float *output, float *bias, int batch_size, int num_channels, int spatial_size, int num_threads) { for (int b = 0; b < batch_size; ++b) { #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < num_channels; ++i) { bcnn_add_scalar(spatial_size, bias[i], output + i * spatial_size); } output += num_channels * spatial_size; } } void bcnn_scales(float *output, float *scales, int batch_size, int num_channels, int spatial_size, int num_threads) { for (int b = 0; b < batch_size; ++b) { #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < num_channels; ++i) { bcnn_scal(spatial_size, scales[i], output + i * spatial_size); } output += num_channels * spatial_size; } } void bcnn_grad_scales(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { int i, b, f; for (f = 0; f < n; ++f) { float sum = 0; for (b = 0; b < batch; ++b) { for (i = 0; i < size; ++i) { int index = i + size * (f + n * b); sum += delta[index] * x_norm[index]; } } scale_updates[f] += sum; } } void bcnn_grad_bias(float *grad_bias, float *grad_data, int batch_size, int num_channels, int spatial_size) { int i, j, b; float *p = NULL; for (b = 0; b < batch_size; ++b) { for (i = 0; i < num_channels; ++i) { p = grad_data + spatial_size * (i + b * num_channels); for (j = 0; j < spatial_size; ++j) { grad_bias[i] += p[j]; } } } } static inline int is_a_positive_and_inferior_to_b(int a, int b) { return (unsigned int)a < (unsigned int)b; } void bcnn_im2col(const float *data_im, const int channels, const int height, const int width, const int kernel_size, const int pad, const int stride, float *data_col) { int channel, kernel_row, kernel_col, output_rows, output_cols, input_col, input_row, output_col; const int output_h = (height + 2 * pad - kernel_size) / stride + 1; const int output_w = (width + 2 * pad - kernel_size) / stride + 1; const int channel_size = height * width; for (channel = channels; channel--; data_im += channel_size) { for (kernel_row = 0; kernel_row < kernel_size; kernel_row++) { for (kernel_col = 0; kernel_col < kernel_size; kernel_col++) { input_row = -pad + kernel_row; for (output_rows = output_h; output_rows; output_rows--) { if (!is_a_positive_and_inferior_to_b(input_row, height)) { for (output_cols = output_w; output_cols; output_cols--) { *(data_col++) = 0; } } else { input_col = -pad + kernel_col; for (output_col = output_w; output_col; output_col--) { if (is_a_positive_and_inferior_to_b(input_col, width)) { *(data_col++) = data_im[input_row * width + input_col]; } else { *(data_col++) = 0; } input_col += stride; } } input_row += stride; } } } } } static void bcnn_im2col_mt_st1(const float *data_im, const int channels, const int height, const int width, const int kernel_size, const int pad, float *data_col, int num_threads) { int height_col = (height + 2 * pad - kernel_size) + 1; int width_col = (width + 2 * pad - kernel_size) + 1; int channels_col = channels * kernel_size * kernel_size; #pragma omp parallel for num_threads(num_threads) for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_size; int h_offset = (c / kernel_size) % kernel_size; int c_im = c / kernel_size / kernel_size; const int hc0 = h_offset - pad; const int wc0 = w_offset - pad; int wleft = bh_max(0, pad - w_offset); int wmid = bh_min(width_col, width + pad - w_offset) - wleft; int wright = bh_max(0, width_col - (width + pad - w_offset)); for (int h = 0; h < pad - h_offset; ++h) { const int row_offset = (c * height_col + h) * width_col; memset(data_col + row_offset, 0, width_col * sizeof(float)); } for (int h = bh_max(0, pad - h_offset); h < bh_min(height_col, height + pad - h_offset); ++h) { int h_pad = h + hc0; const int row_offset = (c * height_col + h) * width_col; const int srow_offset = (c_im * height + h_pad) * width; memset(data_col + row_offset, 0, wleft * sizeof(float)); memcpy(data_col + row_offset + wleft, data_im + srow_offset + wleft + wc0, wmid * sizeof(float)); memset(data_col + row_offset + wleft + wmid, 0, wright * sizeof(float)); } for (int h = height + pad - h_offset; h < height_col; ++h) { const int row_offset = (c * height_col + h) * width_col; memset(data_col + row_offset, 0, width_col * sizeof(float)); } } } void bcnn_im2col_mt(const float *data_im, const int channels, const int height, const int width, const int kernel_size, const int pad, const int stride, float *data_col, int num_threads) { int height_col = (height + 2 * pad - kernel_size) / stride + 1; int width_col = (width + 2 * pad - kernel_size) / stride + 1; int channels_col = channels * kernel_size * kernel_size; if (stride == 1) { bcnn_im2col_mt_st1(data_im, channels, height, width, kernel_size, pad, data_col, num_threads); } else { #pragma omp parallel for num_threads(num_threads) for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_size; int h_offset = (c / kernel_size) % kernel_size; int c_im = c / kernel_size / kernel_size; const int hc0 = h_offset - pad; const int wc0 = w_offset - pad; for (int h = 0; h < height_col; ++h) { int h_pad = h * stride + hc0; const int row_offset = (c * height_col + h) * width_col; const int srow_offset = (c_im * height + h_pad) * width; for (int w = 0; w < width_col; ++w) { int w_pad = w * stride + wc0; if ((((unsigned)h_pad) < ((unsigned)height)) && (((unsigned)w_pad) < ((unsigned)width))) data_col[row_offset + w] = data_im[srow_offset + w_pad]; else { data_col[row_offset + w] = 0.; } } } } } } void bcnn_col2im(const float *data_col, const int channels, const int height, const int width, const int kernel, const int pad, const int stride, float *data_im) { int channel, kernel_row, kernel_col, output_rows, input_col, input_row, output_col; const int output_h = (height + 2 * pad - kernel) / stride + 1; const int output_w = (width + 2 * pad - kernel) / stride + 1; const int channel_size = height * width; bcnn_fill_f32(height * width * channels, 0.0f, data_im); for (channel = channels; channel--; data_im += channel_size) { for (kernel_row = 0; kernel_row < kernel; kernel_row++) { for (kernel_col = 0; kernel_col < kernel; kernel_col++) { input_row = -pad + kernel_row; for (output_rows = output_h; output_rows; output_rows--) { if (!is_a_positive_and_inferior_to_b(input_row, height)) { data_col += output_w; } else { input_col = -pad + kernel_col; for (output_col = output_w; output_col; output_col--) { if (is_a_positive_and_inferior_to_b(input_col, width)) { data_im[input_row * width + input_col] += *data_col; } data_col++; input_col += stride; } } input_row += stride; } } } } } /* Kernels for NC4HW4 layouts */ void bcnn_add_bias_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 mv = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t mv = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; } } } #endif } void bcnn_add_bias_with_relu_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 mv = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); dstv = _mm_max_ps(dstv, mv); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t mv = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); dstv = vmaxq_f32(dstv, mv); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; if (dst_x[i] < 0) { dst_x[i] = 0; } } } } #endif } void bcnn_add_bias_with_lrelu_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); __m128 slopenegv = _mm_set1_ps(0.1f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopenegv, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); float32x4_t slopenegv = vdupq_n_f32(0.1f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopenegv, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : 0.1f * dst_x[i]); } } } #endif } void bcnn_add_bias_with_prelu_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 slopev = _mm_load_ps(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopev, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); float32x4_t slopenegv = vdupq_n_f32(0.1f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t slopev = vld1q_f32(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopev, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *slope_z = slope + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : slope_z[i] * dst_x[i]); } } } #endif } void bcnn_scale_and_add_bias_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; } } } #endif } void bcnn_scale_and_add_bias_with_relu_nc4hw4( float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); dstv = _mm_max_ps(dstv, zerov); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); dstv = vmaxq_f32(dstv, zerov); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : 0.f); } } } #endif } void bcnn_scale_and_add_bias_with_lrelu_nc4hw4( float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); __m128 slopenegv = _mm_set1_ps(0.1f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopenegv, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); float32x4_t slopenegv = vdupq_n_f32(0.1f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopenegv, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : 0.1f * dst_x[i]); } } } #endif } void bcnn_scale_and_add_bias_with_prelu_nc4hw4( float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); __m128 slopev = _mm_load_ps(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopev, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float32x4_t slopev = vld1q_f32(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopev, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; const float *slope_z = slope + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : slope_z[i] * dst_x[i]); } } } #endif } /* Look-up Table for the post convolution functions */ bcnn_post_conv_nc4hw4_func bcnn_post_conv_nc4hw4_lut[8] = { bcnn_add_bias_nc4hw4, bcnn_add_bias_with_relu_nc4hw4, bcnn_add_bias_with_lrelu_nc4hw4, bcnn_add_bias_with_prelu_nc4hw4, bcnn_scale_and_add_bias_nc4hw4, bcnn_scale_and_add_bias_with_relu_nc4hw4, bcnn_scale_and_add_bias_with_lrelu_nc4hw4, bcnn_scale_and_add_bias_with_prelu_nc4hw4}; void bcnn_nchw_to_nc4hw4(float *dst, const float *src, size_t area, size_t depth, int batch_size) { int z, x; int cur = 0; memset(dst, 0, batch_size * area * bh_div_up(depth, 4) * 4 * sizeof(float)); for (int b = 0; b < batch_size; ++b) { float *dst_batch = dst + b * area * bh_div_up(depth, 4) * 4; for (z = 0; z < depth; ++z) { int plane = z / 4; float *dst_plane = dst_batch + plane * area * 4; int offset = z % 4; for (x = 0; x < area; ++x) { dst_plane[4 * x + offset] = src[cur++]; } } } } void bcnn_nc4hw4_to_nchw(float *dst, const float *src, size_t area, size_t depth, int batch_size) { int x; int z; int cur = 0; for (int b = 0; b < batch_size; ++b) { const float *src_batch = src + b * area * bh_div_up(depth, 4) * 4; for (z = 0; z < depth; ++z) { int plane = z / 4; const float *src_plane = src_batch + plane * area * 4; int offset = z % 4; for (x = 0; x < area; ++x) { dst[cur++] = src_plane[4 * x + offset]; } } } } void bcnn_conv3x3_convert_src(const float *src, float *dst, size_t step) { float *_x = (float *)src; float *_y = dst; bv_float4 m00 = bv_float4_sub(bv_float4_load(_x + 4 * 0), bv_float4_load(_x + 4 * 8)); bv_float4 m01 = bv_float4_sub(bv_float4_load(_x + 4 * 1), bv_float4_load(_x + 4 * 9)); bv_float4 m02 = bv_float4_sub(bv_float4_load(_x + 4 * 2), bv_float4_load(_x + 4 * 10)); bv_float4 m03 = bv_float4_sub(bv_float4_load(_x + 4 * 3), bv_float4_load(_x + 4 * 11)); bv_float4 m10 = bv_float4_add(bv_float4_load(_x + 4 * 4), bv_float4_load(_x + 4 * 8)); bv_float4 m11 = bv_float4_add(bv_float4_load(_x + 4 * 5), bv_float4_load(_x + 4 * 9)); bv_float4 m12 = bv_float4_add(bv_float4_load(_x + 4 * 6), bv_float4_load(_x + 4 * 10)); bv_float4 m13 = bv_float4_add(bv_float4_load(_x + 4 * 7), bv_float4_load(_x + 4 * 11)); bv_float4 m20 = bv_float4_sub(bv_float4_load(_x + 4 * 8), bv_float4_load(_x + 4 * 4)); bv_float4 m21 = bv_float4_sub(bv_float4_load(_x + 4 * 9), bv_float4_load(_x + 4 * 5)); bv_float4 m22 = bv_float4_sub(bv_float4_load(_x + 4 * 10), bv_float4_load(_x + 4 * 6)); bv_float4 m23 = bv_float4_sub(bv_float4_load(_x + 4 * 11), bv_float4_load(_x + 4 * 7)); bv_float4 m30 = bv_float4_sub(bv_float4_load(_x + 4 * 12), bv_float4_load(_x + 4 * 4)); bv_float4 m31 = bv_float4_sub(bv_float4_load(_x + 4 * 13), bv_float4_load(_x + 4 * 5)); bv_float4 m32 = bv_float4_sub(bv_float4_load(_x + 4 * 14), bv_float4_load(_x + 4 * 6)); bv_float4 m33 = bv_float4_sub(bv_float4_load(_x + 4 * 15), bv_float4_load(_x + 4 * 7)); bv_float4_store(bv_float4_sub(m00, m02), _y + step * 0); bv_float4_store(bv_float4_add(m01, m02), _y + step * 1); bv_float4_store(bv_float4_sub(m02, m01), _y + step * 2); bv_float4_store(bv_float4_sub(m03, m01), _y + step * 3); bv_float4_store(bv_float4_sub(m10, m12), _y + step * 4); bv_float4_store(bv_float4_add(m11, m12), _y + step * 5); bv_float4_store(bv_float4_sub(m12, m11), _y + step * 6); bv_float4_store(bv_float4_sub(m13, m11), _y + step * 7); bv_float4_store(bv_float4_sub(m20, m22), _y + step * 8); bv_float4_store(bv_float4_add(m21, m22), _y + step * 9); bv_float4_store(bv_float4_sub(m22, m21), _y + step * 10); bv_float4_store(bv_float4_sub(m23, m21), _y + step * 11); bv_float4_store(bv_float4_sub(m30, m32), _y + step * 12); bv_float4_store(bv_float4_add(m31, m32), _y + step * 13); bv_float4_store(bv_float4_sub(m32, m31), _y + step * 14); bv_float4_store(bv_float4_sub(m33, m31), _y + step * 15); } void bcnn_conv3x3_convert_dst(const float *src_z, float *dst_block, size_t step) { float *yy = dst_block; float *x = (float *)src_z; bv_float4 m00 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 0), bv_float4_load(x + step * 4)), bv_float4_load(x + step * 8)); bv_float4 m01 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 1), bv_float4_load(x + step * 5)), bv_float4_load(x + step * 9)); bv_float4 m02 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 2), bv_float4_load(x + step * 6)), bv_float4_load(x + step * 10)); bv_float4 m03 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 3), bv_float4_load(x + step * 7)), bv_float4_load(x + step * 11)); bv_float4 m10 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 4), bv_float4_load(x + step * 8)), bv_float4_load(x + step * 12)); bv_float4 m11 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 5), bv_float4_load(x + step * 9)), bv_float4_load(x + step * 13)); bv_float4 m12 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 6), bv_float4_load(x + step * 10)), bv_float4_load(x + step * 14)); bv_float4 m13 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 7), bv_float4_load(x + step * 11)), bv_float4_load(x + step * 15)); bv_float4_store(bv_float4_add(bv_float4_add(m00, m01), m02), yy + 4 * 0); bv_float4_store(bv_float4_add(bv_float4_sub(m01, m02), m03), yy + 4 * 1); bv_float4_store(bv_float4_add(bv_float4_add(m10, m11), m12), yy + 4 * 2); bv_float4_store(bv_float4_add(bv_float4_sub(m11, m12), m13), yy + 4 * 3); } void bcnn_conv3x3_convert_weights(const float *src_weights, float *dst_weights, int src_channels, int dst_channels) { float weight[CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT]; int srcDepthD4 = bh_div_up(src_channels, 4); int dstDepthD4 = bh_div_up(dst_channels, 4); for (int dz = 0; dz < dst_channels; ++dz) { int dz_4 = dz / CONV3x3_BLOCK_UNIT; int mx = dz % CONV3x3_BLOCK_UNIT; float *dst_dz = dst_weights + dz_4 * srcDepthD4 * 16; for (int sz = 0; sz < src_channels; ++sz) { int sz_4 = sz / CONV3x3_BLOCK_UNIT; int my = sz % CONV3x3_BLOCK_UNIT; float *dst_sz = dst_dz + sz_4 * CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; float *src = (float *)src_weights + 9 * (sz + dz * src_channels); float *dst = weight; float *k = (float *)src; float m00 = k[0]; float m01 = k[1]; float m02 = k[2]; float m10 = 0.500000 * k[0] + 0.500000 * k[3] + 0.500000 * k[6]; float m11 = 0.500000 * k[1] + 0.500000 * k[4] + 0.500000 * k[7]; float m12 = 0.500000 * k[2] + 0.500000 * k[5] + 0.500000 * k[8]; float m20 = 0.500000 * k[0] + -0.500000 * k[3] + 0.500000 * k[6]; float m21 = 0.500000 * k[1] + -0.500000 * k[4] + 0.500000 * k[7]; float m22 = 0.500000 * k[2] + -0.500000 * k[5] + 0.500000 * k[8]; float m30 = 0 + k[6]; float m31 = 0 + k[7]; float m32 = 0 + k[8]; k = dst; k[0] = m00; k[1] = 0.500000 * m00 + 0.500000 * m01 + 0.500000 * m02; k[2] = 0.500000 * m00 + -0.500000 * m01 + 0.500000 * m02; k[3] = 0 + m02; k[4] = m10; k[5] = 0.500000 * m10 + 0.500000 * m11 + 0.500000 * m12; k[6] = 0.500000 * m10 + -0.500000 * m11 + 0.500000 * m12; k[7] = 0 + m12; k[8] = m20; k[9] = 0.500000 * m20 + 0.500000 * m21 + 0.500000 * m22; k[10] = 0.500000 * m20 + -0.500000 * m21 + 0.500000 * m22; k[11] = 0 + m22; k[12] = m30; k[13] = 0.500000 * m30 + 0.500000 * m31 + 0.500000 * m32; k[14] = 0.500000 * m30 + -0.500000 * m31 + 0.500000 * m32; k[15] = 0 + m32; for (int ki = 0; ki < CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; ++ki) { float *dst_i = dst_sz + ki * srcDepthD4 * dstDepthD4 * 16; dst_i[4 * my + mx] = weight[ki]; } } } } //#if defined(BCNN_USE_AVX) static void bcnn_gemm_kernel4x4(float *dst, const float *src, const float *weight, size_t src_depth_quad, size_t dst_step, size_t dst_depth_quad, size_t width, size_t weight_depth_offset) { #if defined(BCNN_USE_AVX) int src_depth_step = 4 * width; int wC4 = width / 4; int w4End = wC4 * 4; for (int dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; const float *weight_dz = weight + dz * (src_depth_quad * 16 + weight_depth_offset); for (int dx = 0; dx < wC4; ++dx) { float *dst_x = dst_z + dx * 4 * 4; __m128 dst0 = _mm_set1_ps(0.0f); __m128 dst1 = _mm_set1_ps(0.0f); __m128 dst2 = _mm_set1_ps(0.0f); __m128 dst3 = _mm_set1_ps(0.0f); const float *src_dx = src + 4 * dx * 4; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; __m128 w0 = _mm_loadu_ps(weight_z + 4 * 0); __m128 w1 = _mm_loadu_ps(weight_z + 4 * 1); __m128 w2 = _mm_loadu_ps(weight_z + 4 * 2); __m128 w3 = _mm_loadu_ps(weight_z + 4 * 3); #define COMPUTE(v) \ { \ __m128 srcValue = _mm_loadu_ps(src_z + 4 * v); \ __m128 s0 = _mm_set1_ps(srcValue[0]); \ __m128 s1 = _mm_set1_ps(srcValue[1]); \ __m128 s2 = _mm_set1_ps(srcValue[2]); \ __m128 s3 = _mm_set1_ps(srcValue[3]); \ __m128 sw0 = _mm_mul_ps(s0, w0); \ __m128 sw1 = _mm_mul_ps(s1, w1); \ __m128 sw2 = _mm_mul_ps(s2, w2); \ __m128 sw3 = _mm_mul_ps(s3, w3); \ dst##v = _mm_add_ps(dst##v, sw0); \ dst##v = _mm_add_ps(dst##v, sw1); \ dst##v = _mm_add_ps(dst##v, sw2); \ dst##v = _mm_add_ps(dst##v, sw3); \ } COMPUTE(0); COMPUTE(1); COMPUTE(2); COMPUTE(3); } _mm_store_ps(dst_x + 4 * 0, dst0); _mm_store_ps(dst_x + 4 * 1, dst1); _mm_store_ps(dst_x + 4 * 2, dst2); _mm_store_ps(dst_x + 4 * 3, dst3); } for (int dx = w4End; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; __m128 dstValue = _mm_set1_ps(0.0f); const float *src_dx = src + 4 * dx; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; __m128 w0 = _mm_loadu_ps(weight_z + 4 * 0); __m128 w1 = _mm_loadu_ps(weight_z + 4 * 1); __m128 w2 = _mm_loadu_ps(weight_z + 4 * 2); __m128 w3 = _mm_loadu_ps(weight_z + 4 * 3); __m128 srcValue = _mm_loadu_ps(src_z); __m128 s0 = _mm_set1_ps(srcValue[0]); __m128 s1 = _mm_set1_ps(srcValue[1]); __m128 s2 = _mm_set1_ps(srcValue[2]); __m128 s3 = _mm_set1_ps(srcValue[3]); __m128 sw0 = _mm_mul_ps(s0, w0); __m128 sw1 = _mm_mul_ps(s1, w1); __m128 sw2 = _mm_mul_ps(s2, w2); __m128 sw3 = _mm_mul_ps(s3, w3); dstValue = _mm_add_ps(dstValue, sw0); dstValue = _mm_add_ps(dstValue, sw1); dstValue = _mm_add_ps(dstValue, sw2); dstValue = _mm_add_ps(dstValue, sw3); } _mm_store_ps(dst_x, dstValue); } } #elif defined(BCNN_USE_NEON) #if defined(__aarch64__) int src_z_step = 4 * width; int weight_z_step = 16 * src_depth_quad + weight_depth_offset; int x13 = src_depth_quad; int w8 = width / 8; int w8tail = (w8 * 8) / 4; int w4 = width / 4; int w4tail = w4 * 4; for (int dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; const float *weight_dz = weight + dz * weight_z_step; for (int dx = 0; dx < w8; ++dx) { const float *src_dx = src + dx * 32; float *dst_x = dst_z + dx * 32; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t dst2 = vdupq_n_f32(0.0f); float32x4_t dst3 = vdupq_n_f32(0.0f); float32x4_t dst4 = vdupq_n_f32(0.0f); float32x4_t dst5 = vdupq_n_f32(0.0f); float32x4_t dst6 = vdupq_n_f32(0.0f); float32x4_t dst7 = vdupq_n_f32(0.0f); float32x4_t w0 = vld1q_f32(weight_dz + 4 * 0); float32x4_t w1 = vld1q_f32(weight_dz + 4 * 1); float32x4_t w2 = vld1q_f32(weight_dz + 4 * 2); float32x4_t w3 = vld1q_f32(weight_dz + 4 * 3); // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_dx); dst0 = vmulq_n_f32(w0, v0[0]); float32x4_t v1 = vld1q_f32(src_dx + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vmulq_n_f32(w0, v1[0]); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_dx + 8); dst2 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vmulq_n_f32(w0, v1[0]); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); // dst4 / dst5 v0 = vld1q_f32(src_dx + 16); dst4 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 20); dst4 = vfmaq_laneq_f32(dst4, w1, v0, 1); dst5 = vmulq_n_f32(w0, v1[0]); dst4 = vfmaq_laneq_f32(dst4, w2, v0, 2); dst5 = vfmaq_laneq_f32(dst5, w1, v1, 1); dst4 = vfmaq_laneq_f32(dst4, w3, v0, 3); dst5 = vfmaq_laneq_f32(dst5, w2, v1, 2); dst5 = vfmaq_laneq_f32(dst5, w3, v1, 3); // dst6 / dst7 v0 = vld1q_f32(src_dx + 24); dst6 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 28); dst6 = vfmaq_laneq_f32(dst6, w1, v0, 1); dst7 = vmulq_n_f32(w0, v1[0]); dst6 = vfmaq_laneq_f32(dst6, w2, v0, 2); dst7 = vfmaq_laneq_f32(dst7, w1, v1, 1); dst6 = vfmaq_laneq_f32(dst6, w3, v0, 3); dst7 = vfmaq_laneq_f32(dst7, w2, v1, 2); dst7 = vfmaq_laneq_f32(dst7, w3, v1, 3); for (int sz = 1; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_z_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_z); dst0 = vfmaq_laneq_f32(dst0, w0, v0, 0); float32x4_t v1 = vld1q_f32(src_z + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vfmaq_laneq_f32(dst1, w0, v1, 0); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_z + 8); dst2 = vfmaq_laneq_f32(dst2, w0, v0, 0); v1 = vld1q_f32(src_z + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vfmaq_laneq_f32(dst3, w0, v1, 0); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); // dst4 / dst5 v0 = vld1q_f32(src_z + 16); dst4 = vfmaq_laneq_f32(dst4, w0, v0, 0); v1 = vld1q_f32(src_z + 20); dst4 = vfmaq_laneq_f32(dst4, w1, v0, 1); dst5 = vfmaq_laneq_f32(dst5, w0, v1, 0); dst4 = vfmaq_laneq_f32(dst4, w2, v0, 2); dst5 = vfmaq_laneq_f32(dst5, w1, v1, 1); dst4 = vfmaq_laneq_f32(dst4, w3, v0, 3); dst5 = vfmaq_laneq_f32(dst5, w2, v1, 2); dst5 = vfmaq_laneq_f32(dst5, w3, v1, 3); // dst6 / dst7 v0 = vld1q_f32(src_z + 24); dst6 = vfmaq_laneq_f32(dst6, w0, v0, 0); v1 = vld1q_f32(src_z + 28); dst6 = vfmaq_laneq_f32(dst6, w1, v0, 1); dst7 = vfmaq_laneq_f32(dst7, w0, v1, 0); dst6 = vfmaq_laneq_f32(dst6, w2, v0, 2); dst7 = vfmaq_laneq_f32(dst7, w1, v1, 1); dst6 = vfmaq_laneq_f32(dst6, w3, v0, 3); dst7 = vfmaq_laneq_f32(dst7, w2, v1, 2); dst7 = vfmaq_laneq_f32(dst7, w3, v1, 3); } vst1q_f32(dst_x + 4 * 0, dst0); vst1q_f32(dst_x + 4 * 1, dst1); vst1q_f32(dst_x + 4 * 2, dst2); vst1q_f32(dst_x + 4 * 3, dst3); vst1q_f32(dst_x + 4 * 4, dst4); vst1q_f32(dst_x + 4 * 5, dst5); vst1q_f32(dst_x + 4 * 6, dst6); vst1q_f32(dst_x + 4 * 7, dst7); } for (int dx = w8tail; dx < w4; ++dx) { const float *src_dx = src + dx * 16; float *dst_x = dst_z + dx * 16; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t dst2 = vdupq_n_f32(0.0f); float32x4_t dst3 = vdupq_n_f32(0.0f); float32x4_t w0 = vld1q_f32(weight_dz + 4 * 0); float32x4_t w1 = vld1q_f32(weight_dz + 4 * 1); float32x4_t w2 = vld1q_f32(weight_dz + 4 * 2); float32x4_t w3 = vld1q_f32(weight_dz + 4 * 3); // start // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_dx); dst0 = vmulq_n_f32(w0, v0[0]); float32x4_t v1 = vld1q_f32(src_dx + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vmulq_n_f32(w0, v1[0]); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_dx + 8); dst2 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vmulq_n_f32(w0, v1[0]); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); for (int sz = 1; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_z_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_z); dst0 = vfmaq_laneq_f32(dst0, w0, v0, 0); float32x4_t v1 = vld1q_f32(src_z + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vfmaq_laneq_f32(dst1, w0, v1, 0); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_z + 8); dst2 = vfmaq_laneq_f32(dst2, w0, v0, 0); v1 = vld1q_f32(src_z + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vfmaq_laneq_f32(dst3, w0, v1, 0); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); } vst1q_f32(dst_x + 4 * 0, dst0); vst1q_f32(dst_x + 4 * 1, dst1); vst1q_f32(dst_x + 4 * 2, dst2); vst1q_f32(dst_x + 4 * 3, dst3); } for (int dx = w4tail; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; const float *src_dx = src + dx * 4; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t w0 = vld1q_f32(weight_dz + 4 * 0); float32x4_t w1 = vld1q_f32(weight_dz + 4 * 1); float32x4_t w2 = vld1q_f32(weight_dz + 4 * 2); float32x4_t w3 = vld1q_f32(weight_dz + 4 * 3); float32x4_t v0 = vld1q_f32(src_dx); dst0 = vmulq_n_f32(w0, v0[0]); dst1 = vmulq_n_f32(w1, v0[1]); for (int sz = 1; sz < src_depth_quad; ++sz) { dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v0, 3); const float *src_z = src_dx + sz * src_z_step; const float *weight_z = weight_dz + sz * 16; w0 = vld1q_f32(weight_z + 4 * 0); w1 = vld1q_f32(weight_z + 4 * 1); w2 = vld1q_f32(weight_z + 4 * 2); w3 = vld1q_f32(weight_z + 4 * 3); v0 = vld1q_f32(src_z); dst0 = vfmaq_laneq_f32(dst0, w0, v0, 0); dst1 = vfmaq_laneq_f32(dst1, w1, v0, 1); } dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v0, 3); dst0 = vaddq_f32(dst0, dst1); vst1q_f32(dst_x, dst0); } } #else // TODO int src_depth_step = 4 * width; int wC4 = width / 4; int w4End = wC4 * 4; for (int dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; float *weight_dz = weight + dz * (src_depth_quad * 16 + weight_depth_offset); for (int dx = 0; dx < wC4; ++dx) { float *dst_x = dst_z + dx * 4 * 4; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t dst2 = vdupq_n_f32(0.0f); float32x4_t dst3 = vdupq_n_f32(0.0f); const float *src_dx = src + 4 * dx * 4; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); #define COMPUTE(v) \ { \ float32x4_t srcValue = vld1q_f32(src_z + 4 * v); \ float32x4_t s0 = vdupq_n_f32(srcValue[0]); \ float32x4_t s1 = vdupq_n_f32(srcValue[1]); \ float32x4_t s2 = vdupq_n_f32(srcValue[2]); \ float32x4_t s3 = vdupq_n_f32(srcValue[3]); \ float32x4_t sw0 = vmulq_f32(s0, w0); \ float32x4_t sw1 = vmulq_f32(s1, w1); \ float32x4_t sw2 = vmulq_f32(s2, w2); \ float32x4_t sw3 = vmulq_f32(s3, w3); \ dst##v = vaddq_f32(dst##v, sw0); \ dst##v = vaddq_f32(dst##v, sw1); \ dst##v = vaddq_f32(dst##v, sw2); \ dst##v = vaddq_f32(dst##v, sw3); \ } COMPUTE(0); COMPUTE(1); COMPUTE(2); COMPUTE(3); } vst1q_f32(dst_x + 4 * 0, dst0); vst1q_f32(dst_x + 4 * 1, dst1); vst1q_f32(dst_x + 4 * 2, dst2); vst1q_f32(dst_x + 4 * 3, dst3); } for (int dx = w4End; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; float32x4_t dstValue = vdupq_n_f32(0.0f); const float *src_dx = src + 4 * dx; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); float32x4_t srcValue = vld1q_f32(src_z); float32x4_t s0 = vdupq_n_f32(srcValue[0]); float32x4_t s1 = vdupq_n_f32(srcValue[1]); float32x4_t s2 = vdupq_n_f32(srcValue[2]); float32x4_t s3 = vdupq_n_f32(srcValue[3]); float32x4_t sw0 = vmulq_f32(s0, w0); float32x4_t sw1 = vmulq_f32(s1, w1); float32x4_t sw2 = vmulq_f32(s2, w2); float32x4_t sw3 = vmulq_f32(s3, w3); dstValue = vaddq_f32(dstValue, sw0); dstValue = vaddq_f32(dstValue, sw1); dstValue = vaddq_f32(dstValue, sw2); dstValue = vaddq_f32(dstValue, sw3); } vst1q_f32(dst_x, dstValue); } } #endif // __aarch64__ #else int dx, sz, fx, fy, dz; size_t src_depth_step = 4 * width; for (dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; float *weight_dz = (float *)weight + dz * (src_depth_quad * 16 + weight_depth_offset); for (dx = 0; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; dst_x[0] = 0.0f; dst_x[1] = 0.0f; dst_x[2] = 0.0f; dst_x[3] = 0.0f; const float *src_dx = src + 4 * dx; for (sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { dst_x[j] += src_z[i] * weight_z[4 * i + j]; } } } } } #endif } static void bcnn_gemm_kernel4x4_tiled(float *dst_batch, const float *src, const float *weight, size_t src_depth_quad, size_t dst_step, size_t dst_depth_quad, size_t weight_depth_offset) { bcnn_gemm_kernel4x4(dst_batch, src, weight, src_depth_quad, dst_step, dst_depth_quad, CONV_TILED, weight_depth_offset); } //#endif void bcnn_conv3x3s1_kernel(float *src, int src_w, int src_h, int src_c, float *dst, int dst_w, int dst_h, int dst_c, int batch_size, int pad, float *weights, float *scales, float *biases, float *slopes, float *workspace, int workspace_sz, int post_func, int num_threads) { int src_c4 = bh_div_up(src_c, 4); int dst_c4 = bh_div_up(dst_c, 4); int dst_w2 = bh_div_up(dst_w, 2); int dst_h2 = bh_div_up(dst_h, 2); int workspace_thread_stride = workspace_sz / num_threads; bcnn_post_conv_nc4hw4_func post_function = bcnn_post_conv_nc4hw4_lut[post_func]; for (int b = 0; b < batch_size; ++b) { float *src_batch = src + src_w * src_h * src_c4 * 4 * b; float *dst_batch = dst + dst_w * dst_h * dst_c4 * 4 * b; int dst_area4 = dst_h2 * dst_w2; int num_tiles = bh_div_up(dst_area4, CONV_TILED); num_threads = bh_min(num_threads, num_tiles); float *weight = weights; float *bias = biases; #pragma omp parallel for num_threads(num_threads) for (int thread_id = 0; thread_id < num_threads; thread_id++) { float *src_thread = workspace + thread_id * workspace_thread_stride; /*fprintf(stderr, "num_threads %d stride %d\n", num_threads, workspace_thread_stride);*/ for (int tid = (int)thread_id; tid < num_tiles; tid += num_threads) { int x_tile = (int)tid * CONV_TILED; int xr = dst_area4 - x_tile; int xc = xr > CONV_TILED ? CONV_TILED : xr; float *dst_block = src_thread + xc * CONV3x3_SRC_BLOCK * (src_c4 + dst_c4); float *dst_thread = src_thread + xc * CONV3x3_SRC_BLOCK * src_c4; // bh_timer t = {0}; // bh_timer_start(&t); for (int xi = 0; xi < xc; ++xi) { int index = x_tile + xi; float *dst_xi = src_thread + 4 * xi; int w_idx = index % dst_w2; int h_idx = index / dst_w2; int src_x = w_idx * 2 - pad; int src_y = h_idx * 2 - pad; int sy = bh_max(0, src_y) - src_y; int ey = bh_min(src_y + 4, src_h) - src_y; int sx = bh_max(0, src_x) - src_x; int ex = bh_min(src_x + 4, src_w) - src_x; float *src_start = src_batch + (src_x + src_y * src_w) * 4; for (int z = 0; z < src_c4; ++z) { memset(dst_block, 0, CONV3x3_SRC_BLOCK * sizeof(float)); float *dst_start = dst_xi + z * 4 * xc; float *src_z = src_start + z * 4 * src_w * src_h; if (ex > sx) { // Extract One Block for (int yy = sy; yy < ey; ++yy) { float *dst_yy = dst_block + yy * 16; float *src_yy = src_z + 4 * src_w * yy; memcpy(dst_yy + 4 * sx, src_yy + sx * 4, 4 * (ex - sx) * sizeof(float)); } } // Transform bcnn_conv3x3_convert_src(dst_block, dst_start, 4 * xc * src_c4); } } // bh_timer_stop(&t); // fprintf(stderr, "conv3x3 src %f\n", bh_timer_get_msec(&t)); // bh_timer_start(&t); if (xc == CONV_TILED) { for (int i = 0; i < CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; ++i) { bcnn_gemm_kernel4x4_tiled( dst_thread + i * dst_c4 * 4 * xc, src_thread + i * src_c4 * 4 * xc, weight + i * 16 * src_c4 * dst_c4, src_c4, xc * 4, dst_c4, 0); } } else { for (int i = 0; i < CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; ++i) { bcnn_gemm_kernel4x4(dst_thread + (i * dst_c4) * xc * 4, src_thread + i * src_c4 * 4 * xc, weight + (i * dst_c4) * src_c4 * 16, src_c4, xc * 4, dst_c4, xc, 0); } } // bh_timer_stop(&t); // fprintf(stderr, "conv3x3 gemm %f\n", bh_timer_get_msec(&t)); // dst for (int xi = 0; xi < xc; ++xi) { int index = x_tile + xi; float *src_xi = dst_thread + 4 * xi; int w_idx = index % dst_w2; int h_idx = index / dst_w2; int dst_x = w_idx * 2; int dst_y = h_idx * 2; float *dst_batch_xi = dst_batch + 4 * (dst_x + dst_y * dst_w); for (int z = 0; z < dst_c4; ++z) { float *src_z = src_xi + z * xc * 4; float *dst_z = dst_batch_xi + z * dst_w * dst_h * 4; bcnn_conv3x3_convert_dst(src_z, dst_block, dst_c4 * 4 * xc); // bias addition and relu float *bias_z = bias + 4 * z; float *scales_z = scales + 4 * z; float *slopes_z = slopes + 4 * z; post_function(dst_block, dst_block, bias_z, scales_z, slopes_z, 4, 1); bv_float4_store(bv_float4_load(dst_block), dst_z); if (w_idx * 2 + 1 < dst_w) { bv_float4_store(bv_float4_load(dst_block + 4), dst_z + 4); } if (h_idx * 2 + 1 < dst_h) { bv_float4_store(bv_float4_load(dst_block + 8), dst_z + dst_w * 4); if (w_idx * 2 + 1 < dst_w) { bv_float4_store(bv_float4_load(dst_block + 12), dst_z + dst_w * 4 + 4); } } } } // bh_timer_stop(&t); // fprintf(stderr, "conv3x3 dst %f\n", bh_timer_get_msec(&t)); } } } return; } // General Matrix-Matrix multiplication // ldb n // _________ // | | // | B | k // | | // ________|______ | // | | | // m| | | m // | A | C | // |_______|_______| // lda k ldc n // // This implementation follows the Blis micro-kernel algorithm // Reference: BLIS: A Framework for Rapidly Instantiating BLAS Functionality static int equal(float a, float b) { const float EPSILON = 1e-5; if (fabsf(a - b) < EPSILON) { return 1; } return 0; } static void sgemm_nn_pack_MRxk8(int k, const float *A, int inc_row_A, int inc_col_A, float *buffer, int mr) { int j, a2 = inc_row_A, a3 = 2 * inc_row_A, a4 = 3 * inc_row_A; int a5 = 4 * inc_row_A; int a6 = 5 * inc_row_A; int a7 = 6 * inc_row_A; int a8 = 7 * inc_row_A; for (j = 0; j < k; ++j) { buffer[0] = A[0]; buffer[1] = A[a2]; buffer[2] = A[a3]; buffer[3] = A[a4]; buffer[4] = A[a5]; buffer[5] = A[a6]; buffer[6] = A[a7]; buffer[7] = A[a8]; A += 1; buffer += mr; } } static void sgemm_nn_pack_MRxk4(int k, const float *A, int inc_row_A, int inc_col_A, float *buffer, int mr) { int j, a2 = inc_row_A, a3 = 2 * inc_row_A, a4 = 3 * inc_row_A; for (j = 0; j < k; ++j) { buffer[0] = A[0]; buffer[1] = A[a2]; buffer[2] = A[a3]; buffer[3] = A[a4]; A += 1; buffer += mr; } } static void sgemm_nn_pack_A(int mc, int kc, const float *A, int inc_row_A, int inc_col_A, float *buffer, int mr, int num_threads) { int mp = mc / mr; int _mr = mc % mr; int tmp1 = kc * mr; int tmp2 = mr * inc_row_A; #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < mp; ++i) { #ifdef BCNN_USE_NEON #if (defined(__aarch64__)) sgemm_nn_pack_MRxk8(kc, A + tmp2 * i, inc_row_A, inc_col_A, buffer + tmp1 * i, mr); #else sgemm_nn_pack_MRxk4(kc, A + tmp2 * i, inc_row_A, inc_col_A, buffer + tmp1 * i, mr); #endif // __aarch64__ #else sgemm_nn_pack_MRxk8(kc, A + tmp2 * i, inc_row_A, inc_col_A, buffer + tmp1 * i, mr); #endif } A += (tmp2 * mp); buffer += (tmp1 * mp); if (_mr > 0) { for (int j = 0; j < kc; ++j) { for (int i = 0; i < _mr; ++i) { buffer[i] = A[i * inc_row_A]; } for (int i = _mr; i < mr; ++i) { buffer[i] = 0.0; } A += 1; buffer += mr; } } } static void sgemm_pack_A(int mc, int kc, const float *A, int inc_row_A, int inc_col_A, float *p, int mr) { int j, l, i0, i, nu; int mp = (mc + mr - 1) / mr; for (j = 0; j < kc; ++j) { for (l = 0; l < mp; ++l) { for (i0 = 0; i0 < mr; ++i0) { i = l * mr + i0; nu = l * mr * kc + j * mr + i0; p[nu] = (i < mc) ? A[i * inc_row_A + j * inc_col_A] : 0; } } } } static void sgemm_pack_B(int kc, int nc, const float *B, int inc_row_B, int inc_col_B, float *p, int nr) { int i, l, j0; const int np = (nc + nr - 1) / nr; for (l = 0; l < np; ++l) { for (i = 0; i < kc; ++i) { for (j0 = 0; j0 < nr; ++j0) { int j = l * nr + j0; int nu = l * nr * kc + i * nr + j0; p[nu] = (j < nc) ? B[i * inc_row_B + j * inc_col_B] : 0; } } } } static void sgemm_nn_pack_kxNR(int k, const float *B, int inc_row_B, int inc_col_B, float *buffer, int nr) { int i, j; for (i = 0; i < k; ++i) { for (j = 0; j < nr; ++j) { buffer[j] = B[j]; } B += inc_row_B; buffer += nr; } } static void sgemm_nn_pack_B(int kc, int nc, const float *B, int inc_row_B, int inc_col_B, float *buffer, int nr, int num_threads) { int np = nc / nr; int _nr = nc % nr; int tmp1 = kc * nr; #pragma omp parallel for num_threads(num_threads) for (int j = 0; j < np; ++j) { sgemm_nn_pack_kxNR(kc, B + nr * j, inc_row_B, inc_col_B, buffer + tmp1 * j, nr); } B += (nr * np); buffer += (tmp1 * np); if (_nr > 0) { for (int i = 0; i < kc; ++i) { for (int j = 0; j < _nr; ++j) { buffer[j] = B[j]; } for (int j = _nr; j < nr; ++j) { buffer[j] = 0.0; } buffer += nr; B += inc_row_B; } } } static void sgemm_ukernel(int kc, float alpha, const float *A, const float *B, float beta, float *C, int inc_row_C, int inc_col_C, int mr, int nr, float *AB0) { float AB[MR * NR] __attribute__((aligned(32))); #if (defined(BCNN_USE_AVX)) __m256 abv0 = _mm256_setzero_ps(); __m256 abv1 = _mm256_setzero_ps(); __m256 abv2 = _mm256_setzero_ps(); __m256 abv3 = _mm256_setzero_ps(); __m256 abv4 = _mm256_setzero_ps(); __m256 abv5 = _mm256_setzero_ps(); __m256 abv6 = _mm256_setzero_ps(); __m256 abv7 = _mm256_setzero_ps(); __m256 av; for (int l = 0; l < kc; ++l) { av = _mm256_load_ps(A); abv0 = _mm256_add_ps(abv0, _mm256_mul_ps(_mm256_broadcast_ss(B), av)); abv1 = _mm256_add_ps(abv1, _mm256_mul_ps(_mm256_broadcast_ss(B + 1), av)); abv2 = _mm256_add_ps(abv2, _mm256_mul_ps(_mm256_broadcast_ss(B + 2), av)); abv3 = _mm256_add_ps(abv3, _mm256_mul_ps(_mm256_broadcast_ss(B + 3), av)); abv4 = _mm256_add_ps(abv4, _mm256_mul_ps(_mm256_broadcast_ss(B + 4), av)); abv5 = _mm256_add_ps(abv5, _mm256_mul_ps(_mm256_broadcast_ss(B + 5), av)); abv6 = _mm256_add_ps(abv6, _mm256_mul_ps(_mm256_broadcast_ss(B + 6), av)); abv7 = _mm256_add_ps(abv7, _mm256_mul_ps(_mm256_broadcast_ss(B + 7), av)); A += mr; B += nr; } _mm256_store_ps(AB + 0, abv0); _mm256_store_ps(AB + 8, abv1); _mm256_store_ps(AB + 16, abv2); _mm256_store_ps(AB + 24, abv3); _mm256_store_ps(AB + 32, abv4); _mm256_store_ps(AB + 40, abv5); _mm256_store_ps(AB + 48, abv6); _mm256_store_ps(AB + 56, abv7); #elif (defined(BCNN_USE_NEON)) #if (defined(__aarch64__)) float32x4_t av0, av1, bv0, bv1; float32x4_t abv0, abv1, abv2, abv3, abv4, abv5, abv6, abv7, abv8, abv9, abv10, abv11, abv12, abv13, abv14, abv15; abv0 = vdupq_n_f32(0.0f); abv1 = vdupq_n_f32(0.0f); abv2 = vdupq_n_f32(0.0f); abv3 = vdupq_n_f32(0.0f); abv4 = vdupq_n_f32(0.0f); abv5 = vdupq_n_f32(0.0f); abv6 = vdupq_n_f32(0.0f); abv7 = vdupq_n_f32(0.0f); abv8 = vdupq_n_f32(0.0f); abv9 = vdupq_n_f32(0.0f); abv10 = vdupq_n_f32(0.0f); abv11 = vdupq_n_f32(0.0f); abv12 = vdupq_n_f32(0.0f); abv13 = vdupq_n_f32(0.0f); abv14 = vdupq_n_f32(0.0f); abv15 = vdupq_n_f32(0.0f); for (int p = 0; p < kc; ++p) { av0 = vld1q_f32(A); av1 = vld1q_f32(A + 4); bv0 = vld1q_f32(B); bv1 = vld1q_f32(B + 4); abv0 = vfmaq_laneq_f32(abv0, av0, bv0, 0); abv1 = vfmaq_laneq_f32(abv1, av1, bv0, 0); abv2 = vfmaq_laneq_f32(abv2, av0, bv0, 1); abv3 = vfmaq_laneq_f32(abv3, av1, bv0, 1); abv4 = vfmaq_laneq_f32(abv4, av0, bv0, 2); abv5 = vfmaq_laneq_f32(abv5, av1, bv0, 2); abv6 = vfmaq_laneq_f32(abv6, av0, bv0, 3); abv7 = vfmaq_laneq_f32(abv7, av1, bv0, 3); abv8 = vfmaq_laneq_f32(abv8, av0, bv1, 0); abv9 = vfmaq_laneq_f32(abv9, av1, bv1, 0); abv10 = vfmaq_laneq_f32(abv10, av0, bv1, 1); abv11 = vfmaq_laneq_f32(abv11, av1, bv1, 1); abv12 = vfmaq_laneq_f32(abv12, av0, bv1, 2); abv13 = vfmaq_laneq_f32(abv13, av1, bv1, 2); abv14 = vfmaq_laneq_f32(abv14, av0, bv1, 3); abv15 = vfmaq_laneq_f32(abv15, av1, bv1, 3); B += nr; A += mr; } vst1q_f32(AB, abv0); vst1q_f32(AB + 4, abv1); vst1q_f32(AB + 8, abv2); vst1q_f32(AB + 12, abv3); vst1q_f32(AB + 16, abv4); vst1q_f32(AB + 20, abv5); vst1q_f32(AB + 24, abv6); vst1q_f32(AB + 28, abv7); vst1q_f32(AB + 32, abv8); vst1q_f32(AB + 36, abv9); vst1q_f32(AB + 40, abv10); vst1q_f32(AB + 44, abv11); vst1q_f32(AB + 48, abv12); vst1q_f32(AB + 52, abv13); vst1q_f32(AB + 56, abv14); vst1q_f32(AB + 60, abv15); #else float32x4_t abv0 = vdupq_n_f32(0.0f); float32x4_t abv1 = vdupq_n_f32(0.0f); float32x4_t abv2 = vdupq_n_f32(0.0f); float32x4_t abv3 = vdupq_n_f32(0.0f); float32x4_t av; float32x4_t bv; float32x2_t bv01; float32x2_t bv23; for (int p = 0; p < kc; ++p) { av = vld1q_f32(A); bv = vld1q_f32(B); bv01 = vget_low_f32(bv); abv0 = vmlaq_lane_f32(abv0, av, bv01, 0); abv1 = vmlaq_lane_f32(abv1, av, bv01, 1); bv23 = vget_high_f32(bv); abv2 = vmlaq_lane_f32(abv2, av, bv23, 0); abv3 = vmlaq_lane_f32(abv3, av, bv23, 1); A += nr; B += nr; } vst1q_f32(AB + 0, abv0); vst1q_f32(AB + 4, abv1); vst1q_f32(AB + 8, abv2); vst1q_f32(AB + 12, abv3); #endif // __aarch64__ #else for (int i = 0; i < nr * nr; ++i) { AB[i] = 0.0f; } for (int l = 0; l < kc; ++l) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { AB[i + j * mr] += A[i] * B[j]; } } A += mr; B += nr; } #endif if (equal(beta, 0.0)) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] = 0.0; } } } else if (!equal(beta, 1.0)) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] *= beta; } } } if (!equal(alpha, 1.0)) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] += alpha * AB[i + j * mr]; } } } else { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] += AB[i + j * mr]; } } } } static void sgemm_axpy(int m, int n, float alpha, const float *X, int incRowX, int incColX, float *Y, int incRowY, int incColY) { int i, j; if (!equal(alpha, 1.0)) { for (j = 0; j < n; ++j) { for (i = 0; i < m; ++i) { Y[i * incRowY + j] += alpha * X[i + j * incColX]; } } } else { for (j = 0; j < n; ++j) { for (i = 0; i < m; ++i) { Y[i * incRowY + j] += X[i + j * incColX]; } } } } static void sgemm_scal(int m, int n, float alpha, float *X, int incRowX, int incColX) { int i, j; if (!equal(alpha, 0.0)) { for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { X[i * incRowX + j] *= alpha; } } } else { for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { X[i * incRowX + j] = 0.0; } } } } static void sgemm_mkernel(int mc, int nc, int kc, float alpha, float beta, float *C, int inc_row_C, int inc_col_C, float *buffer_A, float *buffer_B, float *buffer_AB, float *buffer_C, int mr, int nr, int num_threads) { int mp = (mc + mr - 1) / mr; int np = (nc + nr - 1) / nr; int _mr = mc % mr; int _nr = nc % nr; #pragma omp parallel for num_threads(num_threads) for (int j = 0; j < np; ++j) { int nrj = (j != np - 1 || _nr == 0) ? nr : _nr; #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < mp; ++i) { int mri = (i != mp - 1 || _mr == 0) ? mr : _mr; if (mri == mr && nrj == nr) { sgemm_ukernel(kc, alpha, &buffer_A[i * kc * mr], &buffer_B[j * kc * nr], beta, &C[i * mr * inc_row_C + j * nr], inc_row_C, inc_col_C, mr, nr, buffer_AB); } else { float buf_c[MR * NR]; sgemm_ukernel(kc, alpha, &buffer_A[i * kc * mr], &buffer_B[j * kc * nr], 0.0, buf_c, 1, mr, mr, nr, buffer_AB); sgemm_scal(mri, nrj, beta, &C[i * mr * inc_row_C + j * nr], inc_row_C, inc_col_C); sgemm_axpy(mri, nrj, 1.0, buf_c, 1, mr, &C[i * mr * inc_row_C + j * nr], inc_row_C, inc_col_C); } } } } static void sgemm_nn(bcnn_gemm_context *ctx, int m, int n, int k, float alpha, const float *A, int inc_row_A, int inc_col_A, const float *B, int inc_row_B, int inc_col_B, float beta, float *C, int inc_row_C, int inc_col_C, int num_threads) { int mb = (m + MC - 1) / MC; int nb = (n + NC - 1) / NC; int kb = (k + KC - 1) / KC; int _mc = m % MC; int _nc = n % NC; int _kc = k % KC; if (equal(alpha, 0.0) || k == 0) { sgemm_scal(m, n, beta, C, inc_row_C, inc_col_C); return; } for (int j = 0; j < nb; ++j) { int nc = (j != nb - 1 || _nc == 0) ? NC : _nc; for (int l = 0; l < kb; ++l) { int kc = (l != kb - 1 || _kc == 0) ? KC : _kc; float _beta = (l == 0) ? beta : 1.0f; sgemm_nn_pack_B(kc, nc, &B[l * KC * inc_row_B + j * NC], inc_row_B, inc_col_B, ctx->buffer_b, NR, num_threads); for (int i = 0; i < mb; ++i) { int mc = (i != mb - 1 || _mc == 0) ? MC : _mc; sgemm_nn_pack_A(mc, kc, &A[i * MC * inc_row_A + l * KC], inc_row_A, inc_col_A, ctx->buffer_a, MR, num_threads); sgemm_mkernel( mc, nc, kc, alpha, _beta, &C[i * MC * inc_row_C + j * NC], inc_row_C, inc_col_C, ctx->buffer_a, ctx->buffer_b, ctx->buffer_ab, ctx->buffer_c, MR, NR, num_threads); } } } } static void sgemm(bcnn_gemm_context *ctx, int m, int n, int k, float alpha, const float *A, int inc_row_A, int inc_col_A, const float *B, int inc_row_B, int inc_col_B, float beta, float *C, int inc_row_C, int inc_col_C, int num_threads) { int mb = (m + MC - 1) / MC; int nb = (n + NC - 1) / NC; int kb = (k + KC - 1) / KC; int _mc = m % MC; int _nc = n % NC; int _kc = k % KC; if (equal(alpha, 0.0) || k == 0) { sgemm_scal(m, n, beta, C, inc_row_C, inc_col_C); return; } for (int j = 0; j < nb; ++j) { int nc = (j != nb - 1 || _nc == 0) ? NC : _nc; for (int l = 0; l < kb; ++l) { int kc = (l != kb - 1 || _kc == 0) ? KC : _kc; float _beta = (l == 0) ? beta : 1.0f; sgemm_pack_B(kc, nc, &B[l * KC * inc_row_B + j * NC], inc_row_B, inc_col_B, ctx->buffer_b, NR); for (int i = 0; i < mb; ++i) { int mc = (i != mb - 1 || _mc == 0) ? MC : _mc; sgemm_pack_A(mc, kc, &A[i * MC * inc_row_A + l * KC], inc_row_A, inc_col_A, ctx->buffer_a, MR); sgemm_mkernel( mc, nc, kc, alpha, _beta, &C[i * MC * inc_row_C + j * NC], inc_row_C, inc_col_C, ctx->buffer_a, ctx->buffer_b, ctx->buffer_ab, ctx->buffer_c, MR, NR, num_threads); } } } } int bcnn_gemm(bcnn_gemm_context *ctx, int trans_a, int trans_b, int m, int n, int k, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc, int num_threads) { #if (defined(__aarch64__)) // Switch A and B as OpenBlas is column major openblas_sgemm(ctx, trans_b, trans_a, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc); #else int inc_row_A = (!trans_a) ? lda : 1; int inc_col_A = (!trans_a) ? 1 : lda; int inc_row_B = (!trans_b) ? ldb : 1; int inc_col_B = (!trans_b) ? 1 : ldb; if (!trans_a && !trans_b) { sgemm_nn(ctx, m, n, k, alpha, A, inc_row_A, inc_col_A, B, inc_row_B, inc_col_B, beta, C, ldc, 1, num_threads); } else { sgemm(ctx, m, n, k, alpha, A, inc_row_A, inc_col_A, B, inc_row_B, inc_col_B, beta, C, ldc, 1, num_threads); } #endif return 0; }
ethereum_fmt_plug.c
/* * JtR format to crack password protected Ethereum Wallets. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ethereum; #elif FMT_REGISTERS_H john_register_one(&fmt_ethereum); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 // tuned on i7-6600U #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX 1 // hack, we can't use our simd pbkdf2 code for presale wallets because of varying salt #include "pbkdf2_hmac_sha256.h" #include "ethereum_common.h" #include "escrypt/crypto_scrypt.h" #include "KeccakHash.h" #include "aes.h" #include "jumbo.h" #include "memdbg.h" #define FORMAT_NAME "Ethereum Wallet" #define FORMAT_LABEL "ethereum" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256/scrypt Keccak 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 16 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(*cur_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(uint64_t) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; static custom_salt *cur_salt; static union { uint64_t dummy; unsigned char data[8]; } dpad; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); memcpy(dpad.data, "\x02\x00\x00\x00\x00\x00\x00\x00", 8); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static void set_salt(void *salt) { cur_salt = (custom_salt *)salt; } static void ethereum_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][32]; int i; if (cur_salt->type == 0) { #ifdef SIMD_COEF_64 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; pout[i] = master[i]; } pbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, pout, 32, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, master[i], 32, 0); #endif } else if (cur_salt->type == 1) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) crypto_scrypt((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->saltlen, cur_salt->N, cur_salt->r, cur_salt->p, master[i], 32); } else if (cur_salt->type == 2) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), 2000, master[i], 16, 0); } if (cur_salt->type == 0 || cur_salt->type == 1) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { Keccak_HashInstance hash; Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01); // delimitedSuffix is 0x06 for SHA-3, and 0x01 for Keccak Keccak_HashUpdate(&hash, master[i] + 16, 16 * 8); Keccak_HashUpdate(&hash, cur_salt->ct, cur_salt->ctlen * 8); Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]); } } else { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { AES_KEY akey; Keccak_HashInstance hash; unsigned char iv[16]; unsigned char seed[4096]; int padbyte; int datalen; AES_set_decrypt_key(master[i], 128, &akey); memcpy(iv, cur_salt->encseed, 16); AES_cbc_encrypt(cur_salt->encseed + 16, seed, cur_salt->eslen - 16, &akey, iv, AES_DECRYPT); if (check_pkcs_pad(seed, cur_salt->eslen - 16, 16) < 0) { memset(crypt_out[index+i], 0, BINARY_SIZE); continue; } padbyte = seed[cur_salt->eslen - 16 - 1]; datalen = cur_salt->eslen - 16 - padbyte; if (datalen < 0) { memset(crypt_out[index+i], 0, BINARY_SIZE); continue; } Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01); Keccak_HashUpdate(&hash, seed, datalen * 8); Keccak_HashUpdate(&hash, dpad.data, 1 * 8); Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index+i]); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_ethereum = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "iteration count", }, { FORMAT_TAG }, ethereum_tests }, { init, done, fmt_default_reset, fmt_default_prepare, ethereum_common_valid, fmt_default_split, ethereum_get_binary, ethereum_common_get_salt, { ethereum_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, ethereum_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
test_core.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2013 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * RELIC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with RELIC. If not, see <http://www.gnu.org/licenses/>. */ /** * @file * * Tests for configuration management. * * @version $Id: test_core.c 1364 2013-02-18 11:57:14Z dfaranha $ * @ingroup test */ #include <stdio.h> #include "relic.h" #include "relic_test.h" int main(void) { int code = STS_ERR; /* Initialize library with default configuration. */ if (core_init() != STS_OK) { core_clean(); return 1; } util_banner("Tests for the CORE module:\n", 0); TEST_ONCE("the library context is consistent") { TEST_ASSERT(core_get() != NULL, end); } TEST_END; TEST_ONCE("switching the library context is correct") { ctx_t new_ctx, *old_ctx; /* Backup the old context. */ old_ctx = core_get(); /* Switch the library context. */ core_set(&new_ctx); /* Reinitialize library with new context. */ core_init(); /* Run function to manipulate the library context. */ THROW(ERR_NO_MEMORY); core_set(old_ctx); TEST_ASSERT(err_get_code() == STS_OK, end); core_set(&new_ctx); TEST_ASSERT(err_get_code() == STS_ERR, end); /* Now we need to finalize the new context. */ core_clean(); /* And restore the original context. */ core_set(old_ctx); } TEST_END; code = STS_OK; #ifdef MULTI TEST_ONCE("library context is thread-safe") { omp_set_num_threads(CORES); #pragma omp parallel shared(code) { if (omp_get_thread_num() == 0) { THROW(ERR_NO_MEMORY); if (err_get_code() != STS_ERR) { code = STS_ERR; } } else { core_init(); if (err_get_code() != STS_OK) { code = STS_ERR; } core_clean(); } } TEST_ASSERT(code == STS_OK, end); } TEST_END; #endif util_banner("All tests have passed.\n", 0); end: core_clean(); return code; }
deconv_2d.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_OPS_DECONV_2D_H_ #define MACE_OPS_DECONV_2D_H_ #include <algorithm> #include <string> #include <vector> #include "mace/core/operator.h" #include "mace/core/types.h" #include "mace/ops/activation.h" #include "mace/ops/conv_pool_2d_util.h" namespace mace { namespace ops { enum FrameworkType { TENSORFLOW = 0, CAFFE = 1, }; class Deconv2dOpBase : public Operation { public: explicit Deconv2dOpBase(OpConstructContext *context) : Operation(context), strides_(Operation::GetRepeatedArgs<int>("strides")), padding_type_(static_cast<Padding>(Operation::GetOptionalArg<int>( "padding", static_cast<int>(SAME)))), paddings_(Operation::GetRepeatedArgs<int>("padding_values")), group_(Operation::GetOptionalArg<int>("group", 1)), model_type_(static_cast<ops::FrameworkType>( Operation::GetOptionalArg<int>("framework_type", 0))), activation_(ops::StringToActivationType( Operation::GetOptionalArg<std::string>("activation", "NOOP"))), relux_max_limit_( Operation::GetOptionalArg<float>("max_limit", 0.0f)) {} static void CalcDeconvShape_Caffe( const index_t *input_shape, // NHWC const index_t *filter_shape, // OIHW const int *strides, const int *out_paddings, const int group, int *in_paddings, index_t *out_shape, index_t *padded_out_shape, const bool isNCHW = false) { MACE_CHECK_NOTNULL(out_paddings); MACE_CHECK_NOTNULL(input_shape); MACE_CHECK_NOTNULL(filter_shape); MACE_CHECK_NOTNULL(strides); const index_t in_height = isNCHW ? input_shape[2] : input_shape[1]; const index_t in_width = isNCHW ? input_shape[3] : input_shape[2]; const index_t output_channel = filter_shape[0] * group; const index_t kernel_h = filter_shape[2]; const index_t kernel_w = filter_shape[3]; index_t padded_out_height = (in_height - 1) * strides[0] + kernel_h; index_t padded_out_width = (in_width - 1) * strides[1] + kernel_w; if (in_paddings != nullptr) { in_paddings[0] = static_cast<int>((kernel_h - 1) * 2 - out_paddings[0]); in_paddings[1] = static_cast<int>((kernel_w - 1) * 2 - out_paddings[1]); in_paddings[0] = std::max<int>(0, in_paddings[0]); in_paddings[1] = std::max<int>(0, in_paddings[1]); } if (padded_out_shape != nullptr) { padded_out_shape[0] = input_shape[0]; padded_out_shape[1] = isNCHW ? output_channel : padded_out_height; padded_out_shape[2] = isNCHW ? padded_out_height : padded_out_width; padded_out_shape[3] = isNCHW ? padded_out_width : output_channel; } if (out_shape != nullptr) { index_t out_height = padded_out_height - out_paddings[0]; index_t out_width = padded_out_width - out_paddings[1]; out_shape[0] = input_shape[0]; out_shape[1] = isNCHW ? output_channel : out_height; out_shape[2] = isNCHW ? out_height : out_width; out_shape[3] = isNCHW ? out_width : output_channel; } } static void CalcDeconvShape_TF( const index_t *input_shape, // NHWC const index_t *filter_shape, // OIHW const index_t *output_shape, const int *strides, const int group, Padding padding_type, int *in_paddings, int *out_paddings, index_t *padded_out_shape, const bool isNCHW = false) { MACE_CHECK_NOTNULL(output_shape); MACE_CHECK_NOTNULL(input_shape); MACE_CHECK_NOTNULL(filter_shape); MACE_CHECK_NOTNULL(strides); const index_t in_height = isNCHW ? input_shape[2] : input_shape[1]; const index_t in_width = isNCHW ? input_shape[3] : input_shape[2]; const index_t out_height = isNCHW ? output_shape[2] : output_shape[1]; const index_t out_width = isNCHW ? output_shape[3] : output_shape[2]; const index_t extended_in_height = (in_height - 1) * strides[0] + 1; const index_t extended_in_width = (in_width - 1) * strides[1] + 1; const index_t kernel_h = filter_shape[2]; const index_t kernel_w = filter_shape[3]; index_t expected_input_height = 0, expected_input_width = 0; switch (padding_type) { case VALID: expected_input_height = (out_height - kernel_h + strides[0]) / strides[0]; expected_input_width = (out_width - kernel_w + strides[1]) / strides[1]; break; case SAME: expected_input_height = (out_height + strides[0] - 1) / strides[0]; expected_input_width = (out_width + strides[1] - 1) / strides[1]; break; default: MACE_CHECK(false, "Unsupported padding type: ", padding_type); } MACE_CHECK(expected_input_height == in_height, expected_input_height, "!=", in_height); MACE_CHECK(expected_input_width == in_width, expected_input_width, "!=", in_width); const index_t padded_out_height = (in_height - 1) * strides[0] + kernel_h; const index_t padded_out_width = (in_width - 1) * strides[1] + kernel_w; if (in_paddings != nullptr) { const int p_h = static_cast<int>(out_height + kernel_h - 1 - extended_in_height); const int p_w = static_cast<int>(out_width + kernel_w - 1 - extended_in_width); in_paddings[0] = std::max<int>(0, p_h); in_paddings[1] = std::max<int>(0, p_w); } if (out_paddings != nullptr) { const int o_p_h = static_cast<int>(padded_out_height - out_height); const int o_p_w = static_cast<int>(padded_out_width - out_width); out_paddings[0] = std::max<int>(0, o_p_h); out_paddings[1] = std::max<int>(0, o_p_w); } if (padded_out_shape != nullptr) { index_t output_channel = filter_shape[0] * group; padded_out_shape[0] = output_shape[0]; padded_out_shape[1] = isNCHW ? output_channel : padded_out_height; padded_out_shape[2] = isNCHW ? padded_out_height : padded_out_width; padded_out_shape[3] = isNCHW ? padded_out_width : output_channel; } } protected: std::vector<int> strides_; // [stride_h, stride_w] const Padding padding_type_; std::vector<int> paddings_; const int group_; const FrameworkType model_type_; const ActivationType activation_; const float relux_max_limit_; }; template <typename T> void CropPadOut(const T *input, const index_t *in_shape, const index_t *out_shape, const index_t pad_h, const index_t pad_w, T *output) { const index_t batch = in_shape[0]; const index_t channel = in_shape[1]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; #pragma omp parallel for collapse(3) for (int i = 0; i < batch; ++i) { for (int j = 0; j < channel; ++j) { for (int k = 0; k < out_height; ++k) { const T *input_base = input + ((i * channel + j) * in_height + (k + pad_h)) * in_width; T *output_base = output + ((i * channel + j) * out_height + k)* out_width; memcpy(output_base, input_base + pad_w, out_width * sizeof(T)); } } } } } // namespace ops } // namespace mace #endif // MACE_OPS_DECONV_2D_H_
rsvp_fmt_plug.c
/* * Cracker for HMAC-MD5 and HMAC-SHA1 based authentication in RSVP. * * This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without# * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rsvp; #elif FMT_REGISTERS_H john_register_one(&fmt_rsvp); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 4096 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #endif // __MIC__ #endif #include "arch.h" #include "md5.h" #include "sha.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "rsvp" #define FORMAT_NAME "HMAC-MD5 / HMAC-SHA1, RSVP, IS-IS" #define FORMAT_TAG "$rsvp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEXCHARS "0123456789abcdef" #define MAX_SALT_SIZE 8192 // currently only 2 types, 1 for md5 and 2 for SHA1. Bump this // number each type a type is added, and make sure the types // are sequential. #define MAX_TYPES 2 static struct fmt_tests tests[] = { {"$rsvp$1$10010000ff0000ac002404010100000000000001d7e95bfa0000003a00000000000000000000000000000000000c0101c0a8011406000017000c0301c0a8010a020004020008050100007530000c0b01c0a8010a0000000000240c0200000007010000067f00000545fa000046fa000045fa0000000000007fffffff00300d020000000a010000080400000100000001060000014998968008000001000000000a000001000005dc05000000$636d8e6db5351fbc9dad620c5ec16c0b", "password12345"}, {"$rsvp$2$10010000ff0000b0002804010100000000000001d7e95bfa0000055d0000000000000000000000000000000000000000000c0101c0a8011406000017000c0301c0a8010a020004020008050100007530000c0b01c0a8010a0000000000240c0200000007010000067f00000545fa000046fa000045fa0000000000007fffffff00300d020000000a010000080400000100000001060000014998968008000001000000000a000001000005dc05000000$ab63f157e601742983b853f13a63bc4d4379a434", "JtR_kicks_ass"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; // when we add more types, they need to be sequential (next will be 3), // AND we need to bump this to the count. Each type will use one of these // to track whether it has build the first half of the hmac. The size // of this array should be 1 more than the max number of types. static int new_keys[MAX_TYPES+1]; // we make our crypt_out large enough for an SHA1 output now. Even though // we only compare first BINARY_SIZE data. static ARCH_WORD_32 (*crypt_out)[ (BINARY_SIZE+4) / sizeof(ARCH_WORD_32)]; static SHA_CTX *ipad_ctx; static SHA_CTX *opad_ctx; static MD5_CTX *ipad_mctx; static MD5_CTX *opad_mctx; static struct custom_salt { int type; int salt_length; unsigned char salt[MAX_SALT_SIZE]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); ipad_mctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_mctx)); opad_mctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_mctx)); } static void done(void) { MEM_FREE(opad_mctx); MEM_FREE(ipad_mctx); MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *strkeep; int version; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; strkeep = strdup(ciphertext); p = &strkeep[TAG_LENGTH]; if ((p = strtokm(p, "$")) == NULL) /* version */ goto err; version = atoi(p); if (version != 1 && version != 2) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) >= MAX_SALT_SIZE*2) goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* hash */ goto err; /* there is code that trim longer binary values, so we do not need to check for extra long */ if (strlen(p) < BINARY_SIZE*2) goto err; if (!ishexlc(p)) goto err; MEM_FREE(strkeep); return 1; err:; MEM_FREE(strkeep); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p, *q; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; p = ciphertext; cs.type = atoi(p); p = p + 2; q = strchr(p, '$') + 1; cs.salt_length = (q - p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; return (void*)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { unsigned char buf[20]; if (cur_salt->type == 1) { MD5_CTX ctx; if (new_keys[cur_salt->type]) { int i, len = strlen(saved_key[index]); unsigned char *p = (unsigned char*)saved_key[index]; unsigned char pad[64]; if (len > 64) { MD5_Init(&ctx); MD5_Update(&ctx, p, len); MD5_Final(buf, &ctx); len = 16; p = buf; } for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x36; } MD5_Init(&ipad_mctx[index]); MD5_Update(&ipad_mctx[index], pad, len); if (len < 64) MD5_Update(&ipad_mctx[index], "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 64-len); for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x5C; } MD5_Init(&opad_mctx[index]); MD5_Update(&opad_mctx[index], pad, len); if (len < 64) MD5_Update(&opad_mctx[index], "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 64-len); } memcpy(&ctx, &ipad_mctx[index], sizeof(ctx)); MD5_Update(&ctx, cur_salt->salt, cur_salt->salt_length); MD5_Final(buf, &ctx); memcpy(&ctx, &opad_mctx[index], sizeof(ctx)); MD5_Update(&ctx, buf, 16); MD5_Final((unsigned char*)(crypt_out[index]), &ctx); } else if (cur_salt->type == 2) { SHA_CTX ctx; if (new_keys[cur_salt->type]) { int i, len = strlen(saved_key[index]); unsigned char *p = (unsigned char*)saved_key[index]; unsigned char pad[64]; if (len > 64) { SHA1_Init(&ctx); SHA1_Update(&ctx, p, len); SHA1_Final(buf, &ctx); len = 20; p = buf; } for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x36; } SHA1_Init(&ipad_ctx[index]); SHA1_Update(&ipad_ctx[index], pad, len); if (len < 64) SHA1_Update(&ipad_ctx[index], "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 64-len); for (i = 0; i < len; ++i) { pad[i] = p[i] ^ 0x5C; } SHA1_Init(&opad_ctx[index]); SHA1_Update(&opad_ctx[index], pad, len); if (len < 64) SHA1_Update(&opad_ctx[index], "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 64-len); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, cur_salt->salt, cur_salt->salt_length); SHA1_Final(buf, &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, buf, 20); // NOTE, this writes 20 bytes. That is why we had to bump up the size of each crypt_out[] value, // even though we only look at the first 16 bytes when comparing the saved binary. SHA1_Final((unsigned char*)(crypt_out[index]), &ctx); } } new_keys[cur_salt->type] = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void rsvp_set_key(char *key, int index) { saved_len[index] = strlen(key); strncpy(saved_key[index], key, sizeof(saved_key[0])); // Workaround for self-test code not working as IRL new_keys[1] = new_keys[2] = 2; } static void clear_keys(void) { int i; for (i = 0; i <= MAX_TYPES; ++i) new_keys[i] = 1; } static char *get_key(int index) { return saved_key[index]; } /* * report hash algorithm used for hmac as "tunable cost" */ static unsigned int rsvp_hash_type(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->type; } struct fmt_main fmt_rsvp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "hash algorithm used for hmac [1:MD5 2:SHA1]" }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { rsvp_hash_type, }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, rsvp_set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif
tcp_md5_fmt_plug.c
/* * Cracker for TCP MD5 Signatures, http://www.ietf.org/rfc/rfc2385.txt * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_tcpmd5; #elif FMT_REGISTERS_H john_register_one(&fmt_tcpmd5); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 // scaled K8-dual HT #endif #endif #endif #include "arch.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "tcp-md5" #define FORMAT_NAME "TCP MD5 Signatures, BGP" #define FORMAT_TAG "$tcpmd5$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 // Linux Kernel says "#define TCP_MD5SIG_MAXKEYLEN 80" #define PLAINTEXT_LENGTH 80 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT 1024 static struct fmt_tests tests[] = { /* BGP TCP_MD5SIG hashes */ {"$tcpmd5$c0a83814c0a838280006002800b3d10515f72762291b6878a010007300000000$eaf8d1f1da3f03c90b42709e9508fc73", "lolcats"}, {"$tcpmd5$c0a83828c0a8381400060034d12100b36e73c1c300000000d002390800000000$9a75888344bf20488ebef3ee5b16dd2a", "longbutstilllamepassword"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int length; unsigned char salt[MAX_SALT]; // fixed length, but should be OK } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q) return 0; q = q + 1; if ((q - p - 1) > MAX_SALT * 2) return 0; len = strspn(q, HEXCHARS_lc); if (len != BINARY_SIZE * 2 || len != strlen(q)) return 0; if (strspn(p, HEXCHARS_lc) != q - p - 1) return 0; return 1; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i, len; memset(&cs, 0, SALT_SIZE); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; cs.length = len; return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->salt, cur_salt->length); MD5_Update(&ctx, saved_key[index], saved_len[index]); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void tcpmd5_set_key(char *key, int index) { saved_len[index] = strlen(key); /* strncpy will pad with zeros, which is needed */ strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_tcpmd5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, tcpmd5_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
8537.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp parallel for schedule(static, 28) num_threads(28) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp parallel for schedule(static, 28) num_threads(28) for (i = 0; i < _PB_N; i++) { for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp parallel for schedule(static, 28) num_threads(28) for (j1 = 0; j1 < _PB_M; j1++) { for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
mixed_tentusscher_myo_epi_2004_S3_1.c
// Scenario 3-1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rd) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S3_1.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6404915792850,0.00127032163211322,0.781479753157976,0.781360816517016,0.000172969600594225,0.485842045427499,0.00292520813217015,0.999998371823369,1.91034113695031e-08,1.87293970187045e-05,0.999771221267447,1.00691525856031,0.999992103392003,4.93846276389813e-05,0.695256716079829,9.83880114557068,139.633017313049}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4701107547473,0.000162061905578968,0.000188488521383406,0.000572929459830166,0.335244898151308,0.119541023695594,0.248924317567785,5.19603253018384,0.0221271053316735,2.03169412747953,1099.72574265209,0.000483122952800270,0.478907546954075,0.0199668557152203,0.00562797831559110,3.64128969863145e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
GB_unaryop__abs_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_int64 // op(A') function: GB_tran__abs_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_int64 ( float *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
knapsack.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include "app-desc.h" #include "bots.h" int best_so_far; int number_of_tasks; #pragma omp threadprivate(number_of_tasks) int compare(struct item *a, struct item *b) { double c = ((double) a->value / a->weight) - ((double) b->value / b->weight); if (c > 0) return -1; if (c < 0) return 1; return 0; } int read_input(const char *filename, struct item *items, int *capacity, int *n) { int i; FILE *f; if (filename == NULL) filename = "\0"; f = fopen(filename, "r"); if (f == NULL) { fprintf(stderr, "open_input(\"%s\") failed\n", filename); return -1; } /* format of the input: #items capacity value1 weight1 ... */ fscanf(f, "%d", n); fscanf(f, "%d", capacity); for (i = 0; i < *n; ++i) fscanf(f, "%d %d", &items[i].value, &items[i].weight); fclose(f); /* sort the items on decreasing order of value/weight */ /* cilk2c is fascist in dealing with pointers, whence the ugly cast */ qsort(items, *n, sizeof(struct item), (int (*)(const void *, const void *)) compare); return 0; } /* * return the optimal solution for n items (first is e) and * capacity c. Value so far is v. */ #if defined(IF_CUTOFF) void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } /* * compute the best solution without the current item in the knapsack */ #pragma omp task firstprivate(e,c,n,v,l) shared(without) if (l < bots_cutoff_value) knapsack_par(e + 1, c, n - 1, v, &without,l+1); /* compute the best solution with the current item in the knapsack */ #pragma omp task firstprivate(e,c,n,v,l) shared(with) if (l < bots_cutoff_value) knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1); #pragma omp taskwait best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } #elif defined (MANUAL_CUTOFF) void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } if (l < bots_cutoff_value) { /* compute the best solution without the current item in the knapsack */ #pragma omp task firstprivate(e,c,n,v,l) shared(without) knapsack_par(e + 1, c, n - 1, v, &without,l+1); /* compute the best solution with the current item in the knapsack */ #pragma omp task firstprivate(e,c,n,v,l) shared(with) knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1); #pragma omp taskwait } else { /* compute the best solution without the current item in the knapsack */ knapsack_seq(e + 1, c, n - 1, v, &without); /* compute the best solution with the current item in the knapsack */ knapsack_seq(e + 1, c - e->weight, n - 1, v + e->value, &with); } best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } #else void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } /* * compute the best solution without the current item in the knapsack */ #pragma omp task firstprivate(e,c,n,v,l) shared(without) knapsack_par(e + 1, c, n - 1, v, &without,l+1); /* compute the best solution with the current item in the knapsack */ #pragma omp task firstprivate(e,c,n,v,l) shared(with) knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1); #pragma omp taskwait best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } #endif void knapsack_seq(struct item *e, int c, int n, int v, int *sol) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } /* * compute the best solution without the current item in the knapsack */ knapsack_seq(e + 1, c, n - 1, v, &without); /* compute the best solution with the current item in the knapsack */ knapsack_seq(e + 1, c - e->weight, n - 1, v + e->value, &with); best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } void knapsack_main_par (struct item *e, int c, int n, int *sol) { best_so_far = INT_MIN; #pragma omp parallel { number_of_tasks = 0; #pragma omp single #pragma omp task { knapsack_par(e, c, n, 0, sol, 0); } #pragma omp critical bots_number_of_tasks += number_of_tasks; } if (bots_verbose_mode) printf("Best value for parallel execution is %d\n\n", *sol); } void knapsack_main_seq (struct item *e, int c, int n, int *sol) { best_so_far = INT_MIN; number_of_tasks = 0; knapsack_seq(e, c, n, 0, sol); if (bots_verbose_mode) printf("Best value for sequential execution is %d\n\n", *sol); } int knapsack_check (int sol_seq, int sol_par) { if (sol_seq == sol_par) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
kronecker.h
/** * @file kronecker.h * @author Nader KHAMMASSI - nader.khammassi@gmail.com * @date * @brief */ #include <iostream> #include <vector> #include <complex> #include <cstring> #include "qx/core/linalg.h" #ifdef USE_LIBDIVIDE #include <libdivide.h> #endif //#define println(x) std::cout << x << std::endl //#define print(x) std::cout << x /** * type definition */ //typedef std::complex<double> complex_t; typedef std::vector<qx::linalg::complex_t> row_t; typedef std::vector<row_t> matrix_t; typedef std::vector<qx::linalg::complex_t> vector_t; namespace qx { namespace linalg { /** * kronecker operator interface */ class kronecker_operator { public: virtual complex_t get(size_t i, size_t j) const = 0; virtual size_t size() const = 0; }; /** * identity */ class identity : public kronecker_operator { public: identity(size_t n) : n(n), zero(0.0), one(1.0) { } inline complex_t get(size_t i, size_t j) const { return (i==j ? one : zero); } size_t size() const { return n; } private: size_t n; const complex_t zero; const complex_t one; }; /** * unitary matrix */ class unitary_matrix : public kronecker_operator { public: unitary_matrix(size_t n, matrix_t& m) : n(n), m(m) { } inline complex_t get(size_t i, size_t j) const { return (m[i][j]); } size_t size() const { return n; } private: size_t n; matrix_t m; }; /** * kronecker */ class kronecker { public: kronecker(kronecker_operator * m1, kronecker_operator * m2, kronecker_operator * m3=NULL) : m1(m1), m2(m2), m3(m3) { } inline complex_t get(size_t i, size_t j) const { if (!m3) { size_t n1 = m1->size(); size_t n2 = m2->size(); complex_t c1 = m1->get(i/n2, j/n2); complex_t c2 = m2->get(i%n2, j%n2); // usleep((i+1)*500+(j+i)*500); println("k.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(n1=" << n1 << ", n2=" << n2 << ")"); return (c1*c2); } else { size_t n1 = m1->size(); size_t n2 = m2->size(); size_t n3 = m3->size(); complex_t c1 = m1->get(i/(n2*n3), j/(n2*n3)); complex_t c2 = m2->get((i/n3)%n2, (j/n3)%n2); complex_t c3 = m3->get(i%n3, j%n3); return (c1*c2*c3); } } private: kronecker_operator * m1; kronecker_operator * m2; kronecker_operator * m3; }; /** * const */ const static complex_t __c_zero__ = 0.0; const static complex_t __c_one__ = 1.0f; const static complex_t i_diag[] = { 0.0, 1.0 }; #if 0 /** * kronecker */ class kronecker_ui { public: kronecker_ui(cmatrix_t& m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { return m(i%nm,j%nm); /* complex_t& c1 = m(i%nm,j%nm); // U // usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")"); return ((i/nm) == (j/nm) ? c1 : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni; }; /** * kronecker */ class kronecker_iu { public: kronecker_iu(cmatrix_t& m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { return m(i/ni,j/ni); /* complex_t& c1 = m(i/ni,j/ni); // U // usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")"); return ((i%nm) == (j%nm) ? c1 : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i/nm,j/nm); // U const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni; }; /** * kronecker_iui */ class kronecker_iui { public: kronecker_iui(cmatrix_t& m, size_t nm, size_t ni1, size_t ni2) : m(m), nm(nm), ni1(ni1), ni2(ni2) { } inline complex_t get(size_t i, size_t j) { return m((i/ni2)%nm,(j/ni2)%nm); /* complex_t& c = m((i/ni2)%nm,(j/ni2)%nm); // U bool i1 = (i/(nm*ni2)) == (j/(nm*ni2)); bool i2 = ((i%ni2) == (j%ni2)); return ((i1 && i2) ? c : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni1; size_t ni2; }; #endif #define __mod_2(x) (x & 1) /** * kronecker */ class kronecker_ui { public: kronecker_ui(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { // return m(i%nm,j%nm); // return m[(i%2)*2+j%2]; return m[__mod_2(i)*2+__mod_2(j)]; } /* inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } */ const complex_t * m; private: size_t nm; size_t ni; }; /** * kronecker */ #if 0 class kronecker_iu { public: kronecker_iu(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni), fast_ni(ni) { } inline complex_t get(uint64_t i, uint64_t j) { // return m(i/ni,j/ni); // return m[(i/ni)*2+(j/ni)]; // return m[(i/fast_ni)*2+(j/fast_ni)]; return m[(2*i+j)/fast_ni]; } /* inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i/nm,j/nm); // U const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } */ const complex_t * m; private: uint64_t nm; uint64_t ni; libdivide::divider<uint64_t> fast_ni; }; #endif /** * kronecker */ class kronecker_iu { public: kronecker_iu(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { return m(i/ni,j/ni); /* complex_t& c1 = m(i/ni,j/ni); // U // usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")"); return ((i%nm) == (j%nm) ? c1 : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i/nm,j/nm); // U const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni; }; /** * kronecker_iui */ class kronecker_iui { public: kronecker_iui(const complex_t * m, size_t nm, size_t ni1, size_t ni2) : m(m), nm(nm), ni1(ni1), ni2(ni2) #ifdef USE_LIBDIVIDE , fast_ni2(ni2) #endif { } inline complex_t get(uint64_t i, uint64_t j) { // return m((i/ni2)%nm,(j/ni2)%nm); // return m[((i/ni2)%2)*2+(j/ni2)%2]; // return m[__mod_2(i/ni2)*2+__mod_2((j/ni2))]; #ifdef USE_LIBDIVIDE return m[__mod_2(i/fast_ni2)*2+__mod_2((j/fast_ni2))]; #else return m[__mod_2(i/ni2)*2+__mod_2((j/ni2))]; #endif } /* inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } */ const complex_t * m; private: uint64_t nm; uint64_t ni1; uint64_t ni2; #ifdef USE_LIBDIVIDE libdivide::divider<uint64_t> fast_ni2; #endif }; void printv(cvector_t& v) { print("[ "); for (std::size_t i=0; i<v.size(); ++i) print(v[i].re << ", "); //print(v[i].real() << ", "); println(" ]"); } void mulmv(kronecker& k, cvector_t& v, cvector_t& r) { #pragma omp parallel for schedule(static) for (int64_t i=0; i<(int64_t)v.size(); i++) { complex_t s = 0.0; for (std::size_t j=0; j<v.size(); j++) s += v[j]*(k.get(i,j)); r[i] = s; } } /** * to be tested for correctness */ void mulmv_(kronecker& k, cvector_t& v, cvector_t& r) { complex_t s = 0.; complex_t x = 0.; #pragma omp parallel for private(s,x) schedule(static) for (int64_t i=0; i<(int64_t)v.size(); i++) { s = 0; for (std::size_t j=0; j<v.size(); j++) { x = k.get(i,j); //if ((x.real() != 0) || (x.imag() != 0)) if ((x.re != 0) || (x.im != 0)) s += v[j]*x; } r[i] = s; } } void mulmv(kronecker& k, cvector_t& v, cvector_t& r, size_t block_ib, size_t block_ie, size_t block_jb, size_t block_je) { for (std::size_t i=block_ib; i<block_ie; i++) { complex_t s = r[i]; for (std::size_t j=block_jb; j<block_je; j++) s += v[j]*(k.get(i,j)); r[i] = s; } } } // namespace qx } // namespace linalg
pd_tgv_mex.c
#include <inttypes.h> #include <math.h> #include <omp.h> #include "mex.h" #define MIN_VERBOSE (4) #define CONVERGENCE_MOD (10) /* preprocessor can't compare floating point numbers: */ /* THETA0 = 1 -> THETA = 0.0 */ /* THETA0 = 0 -> THETA = THETA */ #define THETA0 (0) #define THETA (1.0) #define OI (1 << 1) /* unused */ #define BI (1 << 2) /* unused */ #define FI (1 << 3) /* unused */ #define CI (1 << 4) /* unused */ #define OJ (1 << 5) /* unused */ #define BJ (1 << 6) /* unused */ #define FJ (1 << 7) /* unused */ #define CJ (1 << 8) /* unused */ #define OK (1 << 9) /* unused */ #define BK (1 << 10) /* unused */ #define FK (1 << 11) /* unused */ #define CK (1 << 12) /* unused */ #define INSIDE (CI + CJ + CK) #define OUTSIDE (OI + OJ + OK) #define MAX(a,b) (((a)>(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) struct PD { /* primal */ void *chi; void *eta; void *w; void *chi_; void *eta_; void *w_; /* dual */ void *nu; void *p; void *q; /* misc */ void *f; void *m; void *mi; void *mb; /* unused */ void *chi0; void *eta0; /* TODO: w0 */ }; void mx_pdd(struct PD *mxpd, const double lam, const double alpha, const double *h, const double tol, const uint32_t maxiter, const uint32_t verbose); void mx_pdf(struct PD *mxpd, const double lam, const double alpha, const double *h, const double tol, const uint32_t maxiter, const uint32_t verbose); void update_duald(struct PD *pd, const double alpha0, const double alpha1, const double sigma, const double *h, const size_t *sz); void update_dualf(struct PD *pd, const double alpha0, const double alpha1, const double sigma, const double *h, const size_t *sz); void update_primald(struct PD *pd, const double tau, const double *h, const size_t *sz); void update_primalf(struct PD *pd, const double tau, const double *h, const size_t *sz); uint8_t convergence_checkd(double *nr1, double *nr2, const double *chi, const double *chi0, const double *eta, const double *eta0, const double tol, const size_t N); uint8_t convergence_checkf(double *nr1, double *nr2, const float *chi, const float *chi0, const float *eta, const float *eta0, const double tol, const size_t N); void init_bitmask(uint32_t *mb, uint8_t *mi, const uint8_t *m, const size_t *sz); void mx_init(struct PD *pd, const mxArray *mxf, const mxArray *mxm); void mx_cleanup(struct PD *pd); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs != 8) || (nlhs > 1)) { mexErrMsgTxt( "Usage: chi = pd_tgv_mex(d2f, mask, vsz, lam, alpha, tol, maxiter, verbose);" ); return; } const double *h = (const double *)mxGetData(prhs[2]); const double lam = (const double)mxGetScalar(prhs[3]); const double alp = (const double)mxGetScalar(prhs[4]); const double tol = (const double)mxGetScalar(prhs[5]); const uint32_t maxiter = (const uint32_t)mxGetScalar(prhs[6]); const uint32_t verbose = (const uint32_t)mxGetScalar(prhs[7]); struct PD mxpd; mx_init(&mxpd, prhs[0], prhs[1]); if (mxIsSingle(prhs[0])) { mx_pdf(&mxpd, lam, alp, h, tol, maxiter, verbose); } else { mx_pdd(&mxpd, lam, alp, h, tol, maxiter, verbose); } plhs[0] = mxDuplicateArray(mxpd.chi); mx_cleanup(&mxpd); return; } void mx_pdf(struct PD *mxpd, const double lam, const double alpha, const double *h, const double tol, const uint32_t maxiter, const uint32_t verbose) { uint32_t i, l, pmod; double nr1, nr2; const size_t N = (const size_t)mxGetNumberOfElements(mxpd->f); const size_t *sz = (const size_t *)mxGetDimensions(mxpd->f); struct PD pd; pd.chi = (float *)mxGetData(mxpd->chi); pd.eta = (float *)mxGetData(mxpd->eta); pd.w = (float *)mxGetData(mxpd->w); pd.chi_ = (float *)mxGetData(mxpd->chi_); pd.eta_ = (float *)mxGetData(mxpd->eta_); pd.w_ = (float *)mxGetData(mxpd->w_); pd.nu = (float *)mxGetData(mxpd->nu); pd.p = (float *)mxGetData(mxpd->p); pd.q = (float *)mxGetData(mxpd->q); pd.f = (float *)mxGetData(mxpd->f); pd.m = (uint8_t *)mxGetData(mxpd->m); pd.mi = (uint8_t *)mxGetData(mxpd->mi); pd.mb = (uint32_t *)mxGetData(mxpd->mb); pd.chi0 = (float *)mxGetData(mxpd->chi0); pd.eta0 = (float *)mxGetData(mxpd->eta0); float *f = pd.f; float *chi = pd.chi; float *eta = pd.eta; float *chi0 = pd.chi0; float *eta0 = pd.eta0; const double alpha1 = lam; const double alpha0 = alpha * alpha1; double gnrm = 4.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 1.0/(h[2]*h[2])); double wnrm = 6.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 2.0/(h[2]*h[2])) / 3.0; double K2 = gnrm*gnrm + wnrm*wnrm; const double tau = 1/sqrt(K2); const double sigma = 1/(tau*K2); #pragma omp parallel for private(l) schedule(static) for(l = 0; l < N; l += 1) { f[l] = ((float)sigma)*f[l]; } uint8_t flag = 0; nr1 = 1.0; nr2 = 1.0; pmod = maxiter / MIN(MAX(verbose, MIN_VERBOSE), maxiter); if (verbose) { mexPrintf("Iter\t\t||delta chi||\t||delta eta||\n"); } for(i = 0; i < maxiter; ++i) { if (((i+1) % CONVERGENCE_MOD) == 0) { #pragma omp parallel for private(l) schedule(static) for(l = 0; l < N; l += 1) { chi0[l] = chi[l]; eta0[l] = eta[l]; } } update_dualf(&pd, alpha0, alpha1, sigma, h, sz); update_primalf(&pd, tau, h, sz); if (((i+1) % CONVERGENCE_MOD) == 0) { flag = convergence_checkf(&nr1, &nr2, chi, chi0, eta, eta0, tol, N); } if (verbose && (i == 0 || i == maxiter-1 || ((i+1) % pmod) == 0 || flag)) { mexPrintf("%5d/%d\t%.3e\t%.3e\n", i+1, maxiter, nr1, nr2); } if (flag) { break; } } return; } void mx_pdd(struct PD *mxpd, const double lam, const double alpha, const double *h, const double tol, const uint32_t maxiter, const uint32_t verbose) { uint32_t i, l, pmod; double nr1, nr2; const size_t N = (const size_t)mxGetNumberOfElements(mxpd->f); const size_t *sz = (const size_t *)mxGetDimensions(mxpd->f); struct PD pd; pd.chi = (double *)mxGetData(mxpd->chi); pd.eta = (double *)mxGetData(mxpd->eta); pd.w = (double *)mxGetData(mxpd->w); pd.chi_ = (double *)mxGetData(mxpd->chi_); pd.eta_ = (double *)mxGetData(mxpd->eta_); pd.w_ = (double *)mxGetData(mxpd->w_); pd.nu = (double *)mxGetData(mxpd->nu); pd.p = (double *)mxGetData(mxpd->p); pd.q = (double *)mxGetData(mxpd->q); pd.f = (double *)mxGetData(mxpd->f); pd.m = (uint8_t *)mxGetData(mxpd->m); pd.mi = (uint8_t *)mxGetData(mxpd->mi); pd.mb = (uint32_t *)mxGetData(mxpd->mb); pd.chi0 = (double *)mxGetData(mxpd->chi0); pd.eta0 = (double *)mxGetData(mxpd->eta0); double *f = pd.f; double *chi = pd.chi; double *eta = pd.eta; double *chi0 = pd.chi0; double *eta0 = pd.eta0; const double alpha1 = lam; const double alpha0 = alpha * alpha1; double gnrm = 4.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 1.0/(h[2]*h[2])); double wnrm = 6.0 * (1.0/(h[0]*h[0]) + 1.0/(h[1]*h[1]) + 2.0/(h[2]*h[2])) / 3.0; double K2 = gnrm*gnrm + wnrm*wnrm; const double tau = 1/sqrt(K2); const double sigma = 1/(tau*K2); #pragma omp parallel for private(l) schedule(static) for(l = 0; l < N; l += 1) { f[l] = sigma*f[l]; } uint8_t flag = 0; nr1 = 1.0; nr2 = 1.0; pmod = maxiter / MIN(MAX(verbose, MIN_VERBOSE), maxiter); if (verbose) { mexPrintf("Iter\t\t||delta chi||\t||delta eta||\n"); } for(i = 0; i < maxiter; ++i) { if (((i+1) % CONVERGENCE_MOD) == 0) { #pragma omp parallel for private(l) schedule(static) for(l = 0; l < N; l += 1) { chi0[l] = chi[l]; eta0[l] = eta[l]; } } update_duald(&pd, alpha0, alpha1, sigma, h, sz); update_primald(&pd, tau, h, sz); if (((i+1) % CONVERGENCE_MOD) == 0) { flag = convergence_checkd(&nr1, &nr2, chi, chi0, eta, eta0, tol, N); } if (verbose && (i == 0 || i == maxiter-1 || ((i+1) % pmod) == 0 || flag)) { mexPrintf("%5d/%d\t%.3e\t%.3e\n", i+1, maxiter, nr1, nr2); } if (flag) { break; } } return; } /* * nu <- nu + sigma*(laplace(eta_) - wave(chi_) + f) * p <- P_{||.||_inf <= alpha1}(p + sigma*(grad(chi_) - w_)) * q <- P_{||.||_inf <= alpha0}(q + sigma*symgrad(w_)) */ void update_dualf(struct PD *pd, const double alpha0, const double alpha1, const double sigma, const double *h, const size_t *sz) { size_t i, j, k; size_t l; float x; float *nu = (float *)pd->nu; float *p = (float *)pd->p; float *q = (float *)pd->q; #if !THETA0 float *chi_ = (float *)pd->chi_; float *eta_ = (float *)pd->eta_; float *w_ = (float *)pd->w_; #else float *chi_ = (float *)pd->chi; float *eta_ = (float *)pd->eta; float *w_ = (float *)pd->w; #endif float *f = (float *)pd->f; uint8_t *m = (uint8_t *)pd->mi; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const float hx = (float)(sigma/h[0]); const float hy = (float)(sigma/h[1]); const float hz = (float)(sigma/h[2]); const float hx2 = (float)(0.5*sigma/h[0]); const float hy2 = (float)(0.5*sigma/h[1]); const float hz2 = (float)(0.5*sigma/h[2]); const float lhx = (float)(sigma/(h[0]*h[0])); const float lhy = (float)(sigma/(h[1]*h[1])); const float lhz = (float)(sigma/(h[2]*h[2])); const float lhh = -2.0f*(lhx+lhy+lhz); const float whx = (float)(-sigma/(3.0*h[0]*h[0])); const float why = (float)(-sigma/(3.0*h[1]*h[1])); const float whz = (float)( sigma*2.0/(3.0*h[2]*h[2])); const float whh = -2.0f*(whx+why+whz); const float alp0 = (float)alpha0; const float alp1 = (float)alpha1; const float alp02 = (float)(alpha0*alpha0); const float alp12 = (float)(alpha1*alpha1); const float sigma_ = (float)sigma; float *p1 = &p[0*(nxny*nz)]; float *p2 = &p[1*(nxny*nz)]; float *p3 = &p[2*(nxny*nz)]; float *w1_ = &w_[0*(nxny*nz)]; float *w2_ = &w_[1*(nxny*nz)]; float *w3_ = &w_[2*(nxny*nz)]; float *q1 = &q[0*(nxny*nz)]; float *q2 = &q[1*(nxny*nz)]; float *q3 = &q[2*(nxny*nz)]; float *q4 = &q[3*(nxny*nz)]; float *q5 = &q[4*(nxny*nz)]; float *q6 = &q[5*(nxny*nz)]; #pragma omp parallel for private(i,j,k,l,x) schedule(static) \ if (nxny*nz > 32*32*32) for(k = nxny; k < NZ; k += nxny) { for(j = nx; j < NY; j += nx) { l = 1 + j + k; for(i = 1; i < NX; ++i, ++l) { if (m[l] != 0) { nu[l] = nu[l] + ( /* laplace(eta_) */ (lhh*eta_[l] + lhx*(eta_[l-1] + eta_[l+1]) + lhy*(eta_[l-nx] + eta_[l+nx]) + lhz*(eta_[l-nxny] + eta_[l+nxny])) - /* - wave(chi_) */ (whh*chi_[l] + whx*(chi_[l-1] + chi_[l+1]) + why*(chi_[l-nx] + chi_[l+nx]) + whz*(chi_[l-nxny] + chi_[l+nxny])) + /* + f */ f[l] ); /* p + sigma*(grad(chi_) - w_) */ p1[l] = p1[l] + hx*(chi_[l+1]-chi_[l]) - sigma_*w1_[l]; p2[l] = p2[l] + hy*(chi_[l+nx]-chi_[l]) - sigma_*w2_[l]; p3[l] = p3[l] + hz*(chi_[l+nxny]-chi_[l]) - sigma_*w3_[l]; /* p <- P_{||.||_inf <= alpha1}(p) */ x = p1[l]*p1[l] + p2[l]*p2[l] + p3[l]*p3[l]; if (x > alp12) { x = alp1/sqrtf(x); p1[l] = p1[l] * x; p2[l] = p2[l] * x; p3[l] = p3[l] * x; } /* q + sigma*symgrad(w_) */ q1[l] = q1[l] + hx*(w1_[l+1]-w1_[l]); q2[l] = q2[l] + hx2*(w2_[l+1]-w2_[l]) + hy2*(w1_[l+nx]-w1_[l]); q3[l] = q3[l] + hx2*(w3_[l+1]-w3_[l]) + hz2*(w1_[l+nxny]-w1_[l]); q4[l] = q4[l] + hy*(w2_[l+nx]-w2_[l]); q5[l] = q5[l] + hy2*(w3_[l+nx]-w3_[l]) + hz2*(w2_[l+nxny]-w2_[l]); q6[l] = q6[l] + hz*(w3_[l+nxny]-w3_[l]); /* q <- P_{||.||_inf <= alpha0}(q) */ x = q1[l]*q1[l] + q4[l]*q4[l] + q6[l]*q6[l] + 2.0f*(q2[l]*q2[l] + q3[l]*q3[l] + q5[l]*q5[l]); if (x > alp02) { x = alp0/sqrtf(x); q1[l] = q1[l] * x; q2[l] = q2[l] * x; q3[l] = q3[l] * x; q4[l] = q4[l] * x; q5[l] = q5[l] * x; q6[l] = q6[l] * x; } } } } } return; } /* * nu <- nu + sigma*(laplace(eta_) - wave(chi_) + f) * p <- P_{||.||_inf <= alpha1}(p + sigma*(grad(chi_) - w_)) * q <- P_{||.||_inf <= alpha0}(q + sigma*symgrad(w_)) */ void update_duald(struct PD *pd, const double alpha0, const double alpha1, const double sigma, const double *h, const size_t *sz) { size_t i, j, k; size_t l; double x; double *nu = (double *)pd->nu; double *p = (double *)pd->p; double *q = (double *)pd->q; #if !THETA0 double *chi_ = (double *)pd->chi_; double *eta_ = (double *)pd->eta_; double *w_ = (double *)pd->w_; #else double *chi_ = (double *)pd->chi; double *eta_ = (double *)pd->eta; double *w_ = (double *)pd->w; #endif double *f = (double *)pd->f; uint8_t *m = (uint8_t *)pd->mi; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const double hx = sigma/h[0]; const double hy = sigma/h[1]; const double hz = sigma/h[2]; const double hx2 = 0.5*sigma/h[0]; const double hy2 = 0.5*sigma/h[1]; const double hz2 = 0.5*sigma/h[2]; const double lhx = sigma/(h[0]*h[0]); const double lhy = sigma/(h[1]*h[1]); const double lhz = sigma/(h[2]*h[2]); const double lhh = -2.0*(lhx+lhy+lhz); const double whx = -sigma/(3.0*h[0]*h[0]); const double why = -sigma/(3.0*h[1]*h[1]); const double whz = sigma*2.0/(3.0*h[2]*h[2]); const double whh = -2.0*(whx+why+whz); const double alp0 = alpha0; const double alp1 = alpha1; const double alp02 = alpha0*alpha0; const double alp12 = alpha1*alpha1; double *p1 = &p[0*(nxny*nz)]; double *p2 = &p[1*(nxny*nz)]; double *p3 = &p[2*(nxny*nz)]; double *w1_ = &w_[0*(nxny*nz)]; double *w2_ = &w_[1*(nxny*nz)]; double *w3_ = &w_[2*(nxny*nz)]; double *q1 = &q[0*(nxny*nz)]; double *q2 = &q[1*(nxny*nz)]; double *q3 = &q[2*(nxny*nz)]; double *q4 = &q[3*(nxny*nz)]; double *q5 = &q[4*(nxny*nz)]; double *q6 = &q[5*(nxny*nz)]; #pragma omp parallel for private(i,j,k,l,x) schedule(static) \ if (nxny*nz > 32*32*32) for(k = nxny; k < NZ; k += nxny) { for(j = nx; j < NY; j += nx) { l = 1 + j + k; for(i = 1; i < NX; ++i, ++l) { if (m[l] != 0) { nu[l] = nu[l] + ( /* laplace(eta_) */ (lhh*eta_[l] + lhx*(eta_[l-1] + eta_[l+1]) + lhy*(eta_[l-nx] + eta_[l+nx]) + lhz*(eta_[l-nxny] + eta_[l+nxny])) - /* - wave(chi_) */ (whh*chi_[l] + whx*(chi_[l-1] + chi_[l+1]) + why*(chi_[l-nx] + chi_[l+nx]) + whz*(chi_[l-nxny] + chi_[l+nxny])) + /* + f */ f[l] ); /* p + sigma*(grad(chi_) - w_) */ p1[l] = p1[l] + hx*(chi_[l+1]-chi_[l]) - sigma*w1_[l]; p2[l] = p2[l] + hy*(chi_[l+nx]-chi_[l]) - sigma*w2_[l]; p3[l] = p3[l] + hz*(chi_[l+nxny]-chi_[l]) - sigma*w3_[l]; /* p <- P_{||.||_inf <= alpha1}(p) */ x = p1[l]*p1[l] + p2[l]*p2[l] + p3[l]*p3[l]; if (x > alp12) { x = alp1/sqrt(x); p1[l] = p1[l] * x; p2[l] = p2[l] * x; p3[l] = p3[l] * x; } /* q + sigma*symgrad(w_) */ q1[l] = q1[l] + hx*(w1_[l+1]-w1_[l]); q2[l] = q2[l] + hx2*(w2_[l+1]-w2_[l]) + hy2*(w1_[l+nx]-w1_[l]); q3[l] = q3[l] + hx2*(w3_[l+1]-w3_[l]) + hz2*(w1_[l+nxny]-w1_[l]); q4[l] = q4[l] + hy*(w2_[l+nx]-w2_[l]); q5[l] = q5[l] + hy2*(w3_[l+nx]-w3_[l]) + hz2*(w2_[l+nxny]-w2_[l]); q6[l] = q6[l] + hz*(w3_[l+nxny]-w3_[l]); /* q <- P_{||.||_inf <= alpha0}(q) */ x = q1[l]*q1[l] + q4[l]*q4[l] + q6[l]*q6[l] + 2.0*(q2[l]*q2[l] + q3[l]*q3[l] + q5[l]*q5[l]); if (x > alp02) { x = alp0/sqrt(x); q1[l] = q1[l] * x; q2[l] = q2[l] * x; q3[l] = q3[l] * x; q4[l] = q4[l] * x; q5[l] = q5[l] * x; q6[l] = q6[l] * x; } } } } } return; } /* * eta <- (eta - tau*laplace(nu)) / (1+lambda*tau) * chi <- chi - tau*(-div(p) - wave(nu)) * w <- w - tau*(-p - div2(q)) * * eta_ <- eta_n+1 + THETA*(eta_n+1 - eta_n) * chi_ <- chi_n+1 + THETA*(chi_n+1 - chi_n) * w_ <- w_n+1 + THETA*( w_n+1 - w_n) */ void update_primalf(struct PD *pd, const double tau, const double *h, const size_t *sz) { size_t i, j, k; size_t l; float x, y, z; float *chi = (float *)pd->chi; float *eta = (float *)pd->eta; float *w = (float *)pd->w; #if !THETA0 float cn, en, w1n, w2n, w3n; float *chi_ = (float *)pd->chi_; float *eta_ = (float *)pd->eta_; float *w_ = (float *)pd->w_; #endif float *nu = (float *)pd->nu; float *p = (float *)pd->p; float *q = (float *)pd->q; uint8_t *m = (uint8_t *)pd->m; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const float hx = (float)(tau/h[0]); const float hy = (float)(tau/h[1]); const float hz = (float)(tau/h[2]); const float lhx = (float)(tau/(h[0]*h[0])); const float lhy = (float)(tau/(h[1]*h[1])); const float lhz = (float)(tau/(h[2]*h[2])); const float lhh = -2.0f*(lhx+lhy+lhz); const float whx = (float)(-tau/(3.0*h[0]*h[0])); const float why = (float)(-tau/(3.0*h[1]*h[1])); const float whz = (float)( tau*2.0/(3.0*h[2]*h[2])); const float whh = -2.0f*(whx+why+whz); const float tau_ = (float)tau; const float tau1 = (float)(1.0 / (1.0 + tau)); float *w1 = &w[0*(nxny*nz)]; float *w2 = &w[1*(nxny*nz)]; float *w3 = &w[2*(nxny*nz)]; #if !THETA0 float *w1_ = &w_[0*(nxny*nz)]; float *w2_ = &w_[1*(nxny*nz)]; float *w3_ = &w_[2*(nxny*nz)]; #endif float *p1 = &p[0*(nxny*nz)]; float *p2 = &p[1*(nxny*nz)]; float *p3 = &p[2*(nxny*nz)]; float *q1 = &q[0*(nxny*nz)]; float *q2 = &q[1*(nxny*nz)]; float *q3 = &q[2*(nxny*nz)]; float *q4 = &q[3*(nxny*nz)]; float *q5 = &q[4*(nxny*nz)]; float *q6 = &q[5*(nxny*nz)]; #pragma omp parallel for private(i,j,k,l,x,y,z) schedule(static) \ if (nxny*nz > 32*32*32) for(k = nxny; k < NZ; k += nxny) { for(j = nx; j < NY; j += nx) { l = 1 + j + k; for(i = 1; i < NX; ++i, ++l) { if (m[l] != 0) { /* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */ /* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */ /* w_ <- w_n+1 + theta*( w_n+1 - w_n) */ #if !THETA0 en = eta[l]; cn = chi[l]; w1n = w1[l]; w2n = w2[l]; w3n = w3[l]; #endif x = (nu[l-1] + nu[l+1]); y = (nu[l-nx] + nu[l+nx]); z = (nu[l-nxny] + nu[l+nxny]); /* eta_n+1 <- (eta_n - tau*laplace(nu)) / (1+tau) */ eta[l] = tau1 * ( eta[l] - lhh*nu[l] - lhx*x - lhy*y - lhz*z ); /* chi_n+1 <- chi_n - tau*(-div(p) - wave(nu)) */ chi[l] = chi[l] + ( /* div(p) */ hx*(p1[l]-p1[l-1]) + hy*(p2[l]-p2[l-nx]) + hz*(p3[l]-p3[l-nxny]) + /* wave(nu) */ (whh*nu[l] + whx*x + why*y + whz*z) ); /* w_n+1 <- w_n - tau*(-p - div2(q)) */ w1[l] = w1[l] + ( tau_*p1[l] + hx*( q1[l]-q1[l-1] + q2[l]-q2[l-nx] + q3[l]-q3[l-nxny] ) ); w2[l] = w2[l] + ( tau_*p2[l] + hy*( q2[l]-q2[l-1] + q4[l]-q4[l-nx] + q5[l]-q5[l-nxny] ) ); w3[l] = w3[l] + ( tau_*p3[l] + hz*( q3[l]-q3[l-1] + q5[l]-q5[l-nx] + q6[l]-q6[l-nxny] ) ); /* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */ /* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */ /* w_ <- w_n+1 + theta*( w_n+1 - w_n) */ #if !THETA0 eta_[l] = eta[l] + ((float)THETA)*(eta[l] - en); chi_[l] = chi[l] + ((float)THETA)*(chi[l] - cn); w1_[l] = w1[l] + ((float)THETA)*(w1[l] - w1n); w2_[l] = w2[l] + ((float)THETA)*(w2[l] - w2n); w3_[l] = w3[l] + ((float)THETA)*(w3[l] - w3n); #endif } } } } return; } /* * eta <- (eta - tau*laplace(nu)) / (1+lambda*tau) * chi <- chi - tau*(-div(p) - wave(nu)) * w <- w - tau*(-p - div2(q)) * * eta_ <- eta_n+1 + THETA*(eta_n+1 - eta_n) * chi_ <- chi_n+1 + THETA*(chi_n+1 - chi_n) * w_ <- w_n+1 + THETA*( w_n+1 - w_n) */ void update_primald(struct PD *pd, const double tau, const double *h, const size_t *sz) { size_t i, j, k; size_t l; double x, y, z; double *chi = (double *)pd->chi; double *eta = (double *)pd->eta; double *w = (double *)pd->w; #if !THETA0 double cn, en, w1n, w2n, w3n; double *chi_ = (double *)pd->chi_; double *eta_ = (double *)pd->eta_; double *w_ = (double *)pd->w_; #endif double *nu = (double *)pd->nu; double *p = (double *)pd->p; double *q = (double *)pd->q; uint8_t *m = (uint8_t *)pd->m; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); const double hx = tau/h[0]; const double hy = tau/h[1]; const double hz = tau/h[2]; const double lhx = tau/(h[0]*h[0]); const double lhy = tau/(h[1]*h[1]); const double lhz = tau/(h[2]*h[2]); const double lhh = -2.0*(lhx+lhy+lhz); const double whx = -tau/(3.0*h[0]*h[0]); const double why = -tau/(3.0*h[1]*h[1]); const double whz = tau*2.0/(3.0*h[2]*h[2]); const double whh = -2.0*(whx+why+whz); const double tau1 = 1.0 / (1.0 + tau); double *w1 = &w[0*(nxny*nz)]; double *w2 = &w[1*(nxny*nz)]; double *w3 = &w[2*(nxny*nz)]; #if !THETA0 double *w1_ = &w_[0*(nxny*nz)]; double *w2_ = &w_[1*(nxny*nz)]; double *w3_ = &w_[2*(nxny*nz)]; #endif double *p1 = &p[0*(nxny*nz)]; double *p2 = &p[1*(nxny*nz)]; double *p3 = &p[2*(nxny*nz)]; double *q1 = &q[0*(nxny*nz)]; double *q2 = &q[1*(nxny*nz)]; double *q3 = &q[2*(nxny*nz)]; double *q4 = &q[3*(nxny*nz)]; double *q5 = &q[4*(nxny*nz)]; double *q6 = &q[5*(nxny*nz)]; #pragma omp parallel for private(i,j,k,l,x,y,z) schedule(static) \ if (nxny*nz > 32*32*32) for(k = nxny; k < NZ; k += nxny) { for(j = nx; j < NY; j += nx) { l = 1 + j + k; for(i = 1; i < NX; ++i, ++l) { if (m[l] != 0) { /* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */ /* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */ /* w_ <- w_n+1 + theta*( w_n+1 - w_n) */ #if !THETA0 en = eta[l]; cn = chi[l]; w1n = w1[l]; w2n = w2[l]; w3n = w3[l]; #endif x = (nu[l-1] + nu[l+1]); y = (nu[l-nx] + nu[l+nx]); z = (nu[l-nxny] + nu[l+nxny]); /* eta_n+1 <- (eta_n - tau*laplace(nu)) / (1+tau) */ eta[l] = tau1 * ( eta[l] - lhh*nu[l] - lhx*x - lhy*y - lhz*z ); /* chi_n+1 <- chi_n - tau*(-div(p) - wave(nu)) */ chi[l] = chi[l] + ( /* div(p) */ hx*(p1[l]-p1[l-1]) + hy*(p2[l]-p2[l-nx]) + hz*(p3[l]-p3[l-nxny]) + /* wave(nu) */ (whh*nu[l] + whx*x + why*y + whz*z) ); /* w_n+1 <- w_n - tau*(-p - div2(q)) */ w1[l] = w1[l] + ( tau*p1[l] + hx*( q1[l]-q1[l-1] + q2[l]-q2[l-nx] + q3[l]-q3[l-nxny] ) ); w2[l] = w2[l] + ( tau*p2[l] + hy*( q2[l]-q2[l-1] + q4[l]-q4[l-nx] + q5[l]-q5[l-nxny] ) ); w3[l] = w3[l] + ( tau*p3[l] + hz*( q3[l]-q3[l-1] + q5[l]-q5[l-nx] + q6[l]-q6[l-nxny] ) ); /* eta_ <- eta_n+1 + theta*(eta_n+1 - eta_n) */ /* chi_ <- chi_n+1 + theta*(chi_n+1 - chi_n) */ /* w_ <- w_n+1 + theta*( w_n+1 - w_n) */ #if !THETA0 eta_[l] = eta[l] + ((double)THETA)*(eta[l] - en); chi_[l] = chi[l] + ((double)THETA)*(chi[l] - cn); w1_[l] = w1[l] + ((double)THETA)*(w1[l] - w1n); w2_[l] = w2[l] + ((double)THETA)*(w2[l] - w2n); w3_[l] = w3[l] + ((double)THETA)*(w3[l] - w3n); #endif } } } } return; } uint8_t convergence_checkd(double *nr1, double *nr2, const double *chi, const double *chi0, const double *eta, const double *eta0, const double tol, const size_t N) { size_t l; double n1 = 0.0; double n2 = 0.0; double d1 = 0.0; double d2 = 0.0; /* can do naive summation for this */ /* pairwise summation norm is with the multigrid stuff */ #pragma omp parallel for private(l) \ reduction(+: n1, n2, d1, d2) schedule(static) for(l = 0; l < N; l += 1) { d1 += chi[l]*chi[l]; d2 += eta[l]*eta[l]; n1 += (chi[l]-chi0[l]) * (chi[l]-chi0[l]); n2 += (eta[l]-eta0[l]) * (eta[l]-eta0[l]); } n1 = sqrt(n1/d1); n2 = sqrt(n2/d2); *nr1 = n1; *nr2 = n2; return (uint8_t)((n1 < tol) && (n2 < 2.0*tol)); } uint8_t convergence_checkf(double *nr1, double *nr2, const float *chi, const float *chi0, const float *eta, const float *eta0, const double tol, const size_t N) { size_t l; double n1 = 0.0; double n2 = 0.0; double d1 = 0.0; double d2 = 0.0; /* can do naive summation for this */ /* pairwise summation norm is with the multigrid stuff */ #pragma omp parallel for private(l) \ reduction(+: n1, n2, d1, d2) schedule(static) for(l = 0; l < N; l += 1) { d1 += ((double)chi[l]) * ((double)chi[l]); d2 += ((double)eta[l]) * ((double)eta[l]); n1 += ((double)(chi[l]-chi0[l])) * ((double)chi[l]-chi0[l]); n2 += ((double)(eta[l]-eta0[l])) * ((double)eta[l]-eta0[l]); } n1 = sqrt(n1/d1); n2 = sqrt(n2/d2); *nr1 = n1; *nr2 = n2; return (uint8_t)((n1 < tol) && (n2 < 2.0*tol)); } void init_bitmask(uint32_t *mb, uint8_t *mi, const uint8_t *m, const size_t *sz) { size_t i, j, k; size_t l; uint8_t b, f, o; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t NX = nx-1; const size_t NY = nx*(ny-1); const size_t NZ = nxny*(nz-1); /* * 0 b f c * -------------- * i 1 2 3 4 * j 5 6 7 8 * k 9 10 11 12 */ #pragma omp parallel for private(i,j,k,l,b,f,o) schedule(static) \ if (nxny*nz > 32*32*32) for(k = 0; k <= NZ; k += nxny) { for(j = 0; j <= NY; j += nx) { l = j + k; for(i = 0; i <= NX; ++i, ++l) { o = 3; f = (i < NX) && m[l+1]; mb[l] |= f ? (1 << o) : 0; o = 2; b = (i > 0) && m[l-1]; mb[l] |= b ? (1 << o) : 0; o = 1; mb[l] |= (1 << (o + 2*f + b)); o = 7; f = (j < NY) && m[l+nx]; mb[l] |= f ? (1 << o) : 0; o = 6; b = (j > 0) && m[l-nx]; mb[l] |= b ? (1 << o) : 0; o = 5; mb[l] |= (1 << (o + 2*f + b)); o = 11; f = (k < NZ) && m[l+nxny]; mb[l] |= f ? (1 << o) : 0; o = 10; b = (k > 0) && m[l-nxny]; mb[l] |= b ? (1 << o) : 0; o = 9; mb[l] |= (1 << (o + 2*f + b)); mb[l] = ((mb[l] & OUTSIDE) == OUTSIDE) ? 0 : mb[l]; mi[l] = ((mb[l] & INSIDE) == INSIDE); } } } return; } void mx_init(struct PD *pd, const mxArray *mxf, const mxArray *mxm) { mxClassID T = mxGetClassID(mxf); const size_t *sz = (const size_t *)mxGetDimensions(mxf); const size_t sz3[4] = {sz[0], sz[1], sz[2], 3}; const size_t sz6[4] = {sz[0], sz[1], sz[2], 6}; pd->chi = mxCreateNumericArray(3, sz, T, mxREAL); pd->eta = mxCreateNumericArray(3, sz, T, mxREAL); pd->w = mxCreateNumericArray(4, sz3, T, mxREAL); #if !THETA0 pd->chi_ = mxCreateNumericArray(3, sz, T, mxREAL); pd->eta_ = mxCreateNumericArray(3, sz, T, mxREAL); pd->w_ = mxCreateNumericArray(4, sz3, T, mxREAL); #else pd->chi_ = NULL; pd->eta_ = NULL; pd->w_ = NULL; #endif pd->nu = mxCreateNumericArray(3, sz, T, mxREAL); pd->p = mxCreateNumericArray(4, sz3, T, mxREAL); pd->q = mxCreateNumericArray(4, sz6, T, mxREAL); pd->f = mxDuplicateArray(mxf); pd->m = mxDuplicateArray(mxm); pd->mi = mxCreateNumericArray(3, sz, mxUINT8_CLASS, mxREAL); pd->mb = mxCreateNumericArray(3, sz, mxUINT32_CLASS, mxREAL); pd->chi0 = mxCreateNumericArray(3, sz, T, mxREAL); pd->eta0 = mxCreateNumericArray(3, sz, T, mxREAL); uint8_t *m = (uint8_t *)mxGetData(pd->m); uint8_t *mi = (uint8_t *)mxGetData(pd->mi); uint32_t *mb = (uint32_t *)mxGetData(pd->mb); init_bitmask(mb, mi, m, sz); return; } void mx_cleanup(struct PD *pd) { if (NULL != pd->chi) { mxDestroyArray(pd->chi); pd->chi = NULL; } if (NULL != pd->eta) { mxDestroyArray(pd->eta); pd->eta = NULL; } if (NULL != pd->w) { mxDestroyArray(pd->w); pd->w = NULL; } if (NULL != pd->chi_) { mxDestroyArray(pd->chi_); pd->chi_ = NULL; } if (NULL != pd->eta_) { mxDestroyArray(pd->eta_); pd->eta_ = NULL; } if (NULL != pd->w_) { mxDestroyArray(pd->w_); pd->w_ = NULL; } if (NULL != pd->nu) { mxDestroyArray(pd->nu); pd->nu = NULL; } if (NULL != pd->p) { mxDestroyArray(pd->p); pd->p = NULL; } if (NULL != pd->q) { mxDestroyArray(pd->q); pd->q = NULL; } if (NULL != pd->f) { mxDestroyArray(pd->f); pd->f = NULL; } if (NULL != pd->m) { mxDestroyArray(pd->m); pd->m = NULL; } if (NULL != pd->mi) { mxDestroyArray(pd->mi); pd->mi = NULL; } if (NULL != pd->mb) { mxDestroyArray(pd->mb); pd->mb = NULL; } if (NULL != pd->chi0) { mxDestroyArray(pd->chi0); pd->chi0 = NULL; } if (NULL != pd->eta0) { mxDestroyArray(pd->eta0); pd->eta0 = NULL; } return; }
hello3.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; omp_set_num_threads(4); int k =0; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(nthreads, tid) shared (k) for(k=0;k<8;k++) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d and %d\n", tid,k); } /* All threads join master thread and disband */ }
bfs_replicated.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #define _GNU_SOURCE #include "common.h" #include "oned_csr.h" #include "onesided.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csr_graph g; static unsigned long* g_in_queue; static unsigned long* g_in_queue_summary; static unsigned long* g_out_queue; static unsigned long* g_out_queue_summary; static unsigned long* g_visited; const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; const int ulong_bits_squared = sizeof(unsigned long) * sizeof(unsigned long) * CHAR_BIT * CHAR_BIT; static void allocate_memory(void) { int64_t maxlocalverts = g.max_nlocalverts; int64_t local_queue_summary_size = (maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared; int64_t local_queue_size = local_queue_summary_size * ulong_bits; int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long)); g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long)); g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long)); g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); } static void deallocate_memory(void) { free(g_in_queue); g_in_queue = NULL; free(g_in_queue_summary); g_in_queue_summary = NULL; free(g_out_queue); g_out_queue = NULL; free(g_out_queue_summary); g_out_queue_summary = NULL; free(g_visited); g_visited = NULL; } void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csr(tg, &g); allocate_memory(); /* Make sure all of the space is available */ deallocate_memory(); } void free_graph_data_structure(void) { free_oned_csr_graph(&g); /* deallocate_memory(); */ } int bfs_writes_depth_map(void) {return 1;} /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ void run_bfs(int64_t root, int64_t* pred) { allocate_memory(); const ptrdiff_t nlocalverts = g.nlocalverts; const size_t* const restrict rowstarts = g.rowstarts; const int64_t* const restrict column = g.column; int64_t maxlocalverts = g.max_nlocalverts; /* Set up the visited bitmap. */ const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; const int ulong_bits_squared = ulong_bits * ulong_bits; int64_t local_queue_summary_size = (maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared; int64_t local_queue_size = local_queue_summary_size * ulong_bits; int lg_local_queue_size = lg_int64_t(local_queue_size); int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); #define SWIZZLE_VERTEX(c) ((VERTEX_OWNER(c) << lg_local_queue_size) * ulong_bits | VERTEX_LOCAL(c)) #if 0 int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t)); { size_t i; for (i = 0; i < nlocaledges; ++i) { int64_t c = column[i]; column_swizzled[i] = SWIZZLE_VERTEX(c); } } #endif unsigned long* restrict in_queue = g_in_queue; memset(in_queue, 0, global_queue_size * sizeof(unsigned long)); unsigned long* restrict in_queue_summary = g_in_queue_summary; memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long)); unsigned long* restrict out_queue = g_out_queue; unsigned long* restrict out_queue_summary = g_out_queue_summary; unsigned long* restrict visited = g_visited; memset(visited, 0, local_queue_size * sizeof(unsigned long)); #define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ulong_bits; int bit_idx = vs % ulong_bits; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ulong_bits] |= (1UL << (word_idx % ulong_bits)); in_queue[word_idx] |= mask;} while (0) #define TEST_IN(vs) (((in_queue_summary[vs / ulong_bits / ulong_bits] & (1UL << ((vs / ulong_bits) % ulong_bits))) != 0) && ((in_queue[vs / ulong_bits] & (1UL << (vs % ulong_bits))) != 0)) #define TEST_VISITED_LOCAL(v) ((visited[(v) / ulong_bits] & (1UL << ((v) % ulong_bits))) != 0) // #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0) #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0) SET_IN(root); {ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} if (VERTEX_OWNER(root) == rank) { pred[VERTEX_LOCAL(root)] = root; SET_VISITED_LOCAL(VERTEX_LOCAL(root)); } uint16_t cur_level = 0; while (1) { ++cur_level; #if 0 if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level); #endif memset(out_queue, 0, local_queue_size * sizeof(unsigned long)); // memset(out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long)); ptrdiff_t i, ii; #if 0 #pragma omp parallel for schedule(static) for (i = 0; i < global_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { if (in_queue[i * ulong_bits + j]) val |= mask; } in_queue_summary[i] = val; } #endif unsigned long not_done = 0; #pragma omp parallel for schedule(static) reduction(|:not_done) for (ii = 0; ii < nlocalverts; ii += ulong_bits) { size_t i, i_end = ii + ulong_bits; if (i_end > nlocalverts) i_end = nlocalverts; for (i = ii; i < i_end; ++i) { if (!TEST_VISITED_LOCAL(i)) { size_t j, j_end = rowstarts[i + 1]; for (j = rowstarts[i]; j < j_end; ++j) { int64_t v1 = column[j]; int64_t v1_swizzled = SWIZZLE_VERTEX(v1); if (TEST_IN(v1_swizzled)) { pred[i] = (v1 & INT64_C(0xFFFFFFFFFFFF)) | ((int64_t)cur_level << 48); not_done |= 1; SET_VISITED_LOCAL(i); break; } } } } } #if 1 #pragma omp parallel for schedule(static) for (i = 0; i < local_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { unsigned long full_val = out_queue[i * ulong_bits + j]; visited[i * ulong_bits + j] |= full_val; if (full_val) val |= mask; } out_queue_summary[i] = val; // not_done |= val; } #endif MPI_Allreduce(MPI_IN_PLACE, &not_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD); if (not_done == 0) break; MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); } deallocate_memory(); } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { int64_t v = vertex[i]; owner[i] = VERTEX_OWNER(v); local[i] = VERTEX_LOCAL(v); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
ten_tusscher_2004_epi_S3_1.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_1.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6786130423415,0.00126001338762619,0.782408157036293,0.782291847999983,0.000172030269485783,0.486238827509178,0.00291727730193292,0.999998383985951,1.89824548377254e-08,1.86344423509529e-05,0.999775323244343,1.00730731658865,0.999997712256941,4.01726135419286e-05,0.574990955233012,10.0471666878941,139.498531828078}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2009764772355,0.000315352885461227,0.000118695429886935,0.000239737901440067,0.231421905214242,0.144186320197361,0.137295429829565,4.48864047496363,0.0113697727065106,1.62322439191857,1100,0.000603973891374754,0.206433638678485,0.0181267509566151,0.00125480641274188,3.83309725808045e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
serialized.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <math.h> int main() { omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); void *creator_frame = get_frame_address(0); int t = (int)sin(0.1); #pragma omp task if (t) { void *task_frame = get_frame_address(0); if (creator_frame == task_frame) { // Assume this code was inlined which the compiler is allowed to do. print_frame(0); } else { // The exit frame must be our parent! print_frame_from_outlined_fn(1); } print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[NULL]] // CHECK-SAME: new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0) // CHECK-SAME: =[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]] // CHECK-SAME: parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}} // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2 // CHECK-SAME: codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}} // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]], // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: parent_task_frame.exit=[[EXIT]] // CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}} // CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]] // CHECK-SAME: exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: task level 2 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule // CHECK-SAME: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // parallel_id is 0 because the region ended in the barrier! // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] return 0; }
tsp_hh04.c
/* Description: This program executes my implementation of the "Heinritz Hsiao" algorithm to solve the "Travelling Salesman Problem" Next city in path is either the closest or second closest one, depending on the value of <PICK_CLOSEST_CITY_POSSIBILITY> Author: Georgios Evangelou (1046900) Year: 5 Parallel Programming in Machine Learning Problems Electrical and Computer Engineering Department, University of Patras System Specifications: CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips) GPU: Nvidia GTX 1050 (dual-fan, overclocked) RAM: 8GB (dual-channel, @2666 MHz) Version Notes: Compiles/Runs/Debugs with: gcc tsp_hh04.c -o tsp_hh04 -lm -O3 -pg -fopenmp && time ./tsp_hh04 && gprof ./tsp_hh04 Executes the algorithm for 10.000 cities, spanning in an area of 1.000x1.000 km and produces correct results Inherits all settings of the previous version unless stated otherwise Uses all available threads and each one finds a different path. The shortest one wins and its distance is displayed The use of rand() by every thread is the main reason that there is a decrease in execution time duration Function IsInPath_3() consumes the vast majority of the execution time Results when: PICK_CLOSEST_CITY_POSSIBILITY = 1.00 ===> Minimum total path distance: 89517.46 PICK_CLOSEST_CITY_POSSIBILITY = 0.95 ===> Minimum total path distance: PICK_CLOSEST_CITY_POSSIBILITY = 0.90 ===> Minimum total path distance: PICK_CLOSEST_CITY_POSSIBILITY = 0.85 ===> Minimum total path distance: PICK_CLOSEST_CITY_POSSIBILITY = 0.80 ===> Minimum total path distance: PICK_CLOSEST_CITY_POSSIBILITY = 0.75 ===> Minimum total path distance: Needs: ~ 02 minutes 00 seconds to calculate an optimal path using 12 threads and all optimizations listed below */ // **************************************************************************************************************** #pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations #pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system #pragma GCC target("avx") //Enable AVX // **************************************************************************************************************** #include "stdio.h" #include "stdlib.h" #include "math.h" #include "omp.h" // **************************************************************************************************************** #define N 10000 #define Nx 1000 #define Ny 1000 #define nonExist -999999 #define PICK_CLOSEST_CITY_POSSIBILITY 0.8 #define THREADS 12 // **************************************************************************************************************** float CitiesX[N]; float CitiesY[N]; int ThreadsPath[THREADS][N+1]; double CalculatedDistances[N][N]; // **************************************************************************************************************** // Initializes the cities' positions // **************************************************************************************************************** void SetCities() { printf("Now initializing the positions of the cities...\n"); for (int i=0; i<N; i++) { CitiesX[i] = Nx * (float) rand() / RAND_MAX; CitiesY[i] = Ny * (float) rand() / RAND_MAX; } } // **************************************************************************************************************** // Checks if a city is already in the path (until path[currentPathLength]) // **************************************************************************************************************** int IsInPath_3(int city, int currentPathLength, int Path[]) { for (int i=0; i<currentPathLength; i++) if (Path[i] == city) return 1; return 0; } // **************************************************************************************************************** // Prints the cities' positions // **************************************************************************************************************** void PrintCities() { printf("> The cities are:\n"); for (int i=0; i<N; i++) { printf(">> City: %6d X:%5.2f Y:%5.2f\n", i, CitiesX[i], CitiesY[i] ); } printf("\n"); } // **************************************************************************************************************** // Prints the travelling path // **************************************************************************************************************** void PrintPath_2(int Path[]) { printf("> The path is:\n"); for (int i=0; i<N+1; i++) { printf(">> %d ", Path[i]); } printf("\n"); } // **************************************************************************************************************** // Visually maps the cities' positions // **************************************************************************************************************** void MapCities() { int Map[Ny+1][Nx+1]; printf("Now creating a visual map of the cities...\n"); for (int i=0; i<Nx+1; i++) for (int j=0; j<Ny+1; j++) Map[j][i] = (float) nonExist; //printf("Quantized coordinates are:\n"); for (int c=0; c<N; c++) { int x = (int) CitiesX[c] ; int y = (int) CitiesY[c] ; //printf(" City:%d y=%d and x=%d\n",c,y,x); if (Map[y][x] == nonExist) Map[y][x] = c; else Map[y][x] = -1; } printf("This is the cities' map:\n"); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); for (int y=0; y<Ny+1; y++){ for (int x=0; x<Nx+1; x++) printf("%8d ", Map[y][x]); printf("\n"); } printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); printf("\n"); } // **************************************************************************************************************** // Finds Euclidean distance between two cities // **************************************************************************************************************** double Distance(int A, int B) { return (double) sqrt( (CitiesX[A]-CitiesX[B])*(CitiesX[A]-CitiesX[B]) + (CitiesY[A]-CitiesY[B])*(CitiesY[A]-CitiesY[B]) ); } // **************************************************************************************************************** // Finds Eucleidian distance in a given path // **************************************************************************************************************** double PathDistance_2(int Path[]) { double totDist = 0.0; for (int i=0; i<N; i++) { totDist += Distance(Path[i], Path[i+1]); } totDist += Distance(Path[N], Path[0]); return totDist; } // **************************************************************************************************************** // Finds all Eucleidian distances between all pairs of cities // **************************************************************************************************************** void CalculateAllDistances() { printf("Now calculating distances between all pairs of cities...\n"); for (int i=0; i<N; i++) { printf("\r> Progress: %.2f%%", 100*(i+1)/((float)N)); for (int j=i+1; j<N; j++) { double temp = Distance(i, j); CalculatedDistances[i][j] = temp; CalculatedDistances[j][i] = temp; } } printf(" ===> Completed.\n"); } // **************************************************************************************************************** // Finds the travelling path by visiting the closest or second closest non-visited city each time // **************************************************************************************************************** double FindShortestStepPath_2() { #pragma omp master { printf("Now finding the shortest / second shortest step path...\n"); printf("> Threads running independently in parallel: %d\n", omp_get_num_threads()); } double totDist = 0.0; int visited_cities = 1, current_city = 0, thread = omp_get_thread_num(); int foo = thread*omp_get_wtime()*523; ThreadsPath[thread][0] = 0; ThreadsPath[thread][N] = 0; do { #pragma omp master printf("\r> Progress: %.2f%%", 100*(visited_cities)/((float)N)); double dist = 0, min_dist_1 = INFINITY, min_dist_2 = INFINITY; int closest_city_1 = -1, closest_city_2 = -1; for (int i=0; i<N; i++) { if (IsInPath_3(i, visited_cities, ThreadsPath[thread])) continue; //If we are trying to access current city or a visited one, go to next dist = CalculatedDistances[current_city][i]; if (min_dist_1 > dist) { min_dist_2 = min_dist_1; closest_city_2 = closest_city_1; min_dist_1 = dist; closest_city_1 = i; } else if (min_dist_2 > dist) { min_dist_2 = dist; closest_city_2 = i; } } unsigned seed = 11*visited_cities + 83*thread + 11*omp_get_wtime() + (foo++); float random_number = ((float)rand_r(&seed)) / ((float)RAND_MAX) ; int city_pick = (random_number<PICK_CLOSEST_CITY_POSSIBILITY) ? 1 : 2; int next_city = (city_pick==1) ? closest_city_1 : closest_city_2; ThreadsPath[thread][visited_cities++] = next_city; current_city = next_city; totDist += (city_pick==1) ? min_dist_1 : min_dist_2;; } while (visited_cities<N); totDist += CalculatedDistances[ThreadsPath[thread][N-1]][0]; #pragma omp barrier #pragma omp single printf("\r> Progress: 100.00%% ===> Completed.\n"); #pragma omp barrier printf(">> I am thread #(%2d) and my total path distance is: %lf.02\n", thread, totDist); return totDist; } // **************************************************************************************************************** // The main program // **************************************************************************************************************** int main( int argc, const char* argv[] ) { printf("------------------------------------------------------------------------------\n"); printf("This program searches for the optimal traveling distance between %d cities,\n", N); printf("spanning in an area of X=(0,%d) and Y=(0,%d)\n", Nx, Ny); printf("------------------------------------------------------------------------------\n"); srand(1046900); SetCities(); CalculateAllDistances(); double totDistEstimation = INFINITY; #pragma omp parallel reduction(min:totDistEstimation) num_threads(THREADS) { totDistEstimation = FindShortestStepPath_2(); } printf("\n"); printf("Minimum total path distance found is: %.2lf\n", totDistEstimation); return 0 ; }
vla_crash.c
// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics int a; // CHECK-LABEL: foo void foo() { int(*b)[a]; int *(**c)[a]; // CHECK: [[B:%.+]] = alloca i32*, // CHECK: [[C:%.+]] = alloca i32***, // CHECK: @__kmpc_global_thread_num // CHECK: call void @__kmpc_serialized_parallel // CHECK: call void [[OUTLINED:@[^(]+]](i32* %{{[^,]+}}, i32* %{{[^,]+}}, i64 %{{[^,]+}}, i32** [[B]], i64 %{{[^,]+}}, i32**** [[C]]) // CHECK: call void @__kmpc_end_serialized_parallel // CHECK: ret void #pragma omp parallel if (0) b[0][0] = c[0][a][0][a]; } // CHECK: define internal void [[OUTLINED]](i32* {{[^,]+}}, i32* {{[^,]+}}, i64 {{[^,]+}}, i32** {{[^,]+}}, i64 {{[^,]+}}, i32**** {{[^,]+}})
GB_subassign_05e.c
//------------------------------------------------------------------------------ // GB_subassign_05e: C(:,:)<M,struct> = scalar ; no S, C empty, M structural //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 05e: C(:,:)<M,struct> = scalar ; no S // compare with Methods 21, 25, and 05d // M: present // Mask_comp: false // Mask_struct: true // C_replace: false // accum: NULL // A: scalar // S: none #include "GB_subassign_methods.h" #undef GB_FREE_ALL #define GB_FREE_ALL GrB_Info GB_subassign_05e ( GrB_Matrix C, // input: const GrB_Matrix M, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT_MATRIX_OK (C, "C for subassign method_05e", GB0) ; ASSERT_MATRIX_OK (M, "M for subassign method_05e", GB0) ; ASSERT (GB_NNZ (C) == 0) ; ASSERT (!GB_PENDING (C)) ; ASSERT (!GB_ZOMBIES (C)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ; const GB_Type_code ccode = C->type->code ; const size_t csize = C->type->size ; GB_GET_SCALAR ; int64_t mnz = GB_NNZ (M) ; //-------------------------------------------------------------------------- // Method 05e: C(:,:)<M> = x ; C is empty, x is a scalar, M is structural //-------------------------------------------------------------------------- // Time: Optimal: the method must iterate over all entries in M, // and the time is O(nnz(M)). This is also the size of C. //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (mnz, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate C and create its pattern //-------------------------------------------------------------------------- // clear prior content and then create a copy of the pattern of M. Keep // the same type and CSR/CSC for C. Allocate the values of C but do not // initialize them. bool C_is_csc = C->is_csc ; GB_PHIX_FREE (C) ; GB_OK (GB_dup2 (&C, M, false, C->type, Context)) ; C->is_csc = C_is_csc ; int64_t pC ; //-------------------------------------------------------------------------- // define the worker for the switch factory //-------------------------------------------------------------------------- // worker for built-in types #define GB_WORKER(ctype) \ { \ ctype *GB_RESTRICT Cx = C->x ; \ ctype x = (*(ctype *) cwork) ; \ GB_PRAGMA (omp parallel for num_threads(nthreads) schedule(static)) \ for (pC = 0 ; pC < mnz ; pC++) \ { \ Cx [pC] = x ; \ } \ } \ break ; //-------------------------------------------------------------------------- // launch the switch factory //-------------------------------------------------------------------------- switch (C->type->code) { case GB_BOOL_code : GB_WORKER (bool) ; case GB_INT8_code : GB_WORKER (int8_t) ; case GB_INT16_code : GB_WORKER (int16_t) ; case GB_INT32_code : GB_WORKER (int32_t) ; case GB_INT64_code : GB_WORKER (int64_t) ; case GB_UINT8_code : GB_WORKER (uint8_t) ; case GB_UINT16_code : GB_WORKER (uint16_t) ; case GB_UINT32_code : GB_WORKER (uint32_t) ; case GB_UINT64_code : GB_WORKER (uint64_t) ; case GB_FP32_code : GB_WORKER (float) ; case GB_FP64_code : GB_WORKER (double) ; default: { // worker for all user-defined types GB_BURBLE_N (mnz, "generic ") ; GB_void *GB_RESTRICT Cx = C->x ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < mnz ; pC++) { memcpy (Cx +((pC)*csize), cwork, csize) ; } } break ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; ASSERT_MATRIX_OK (C, "C output for subassign method_05e", GB0) ; return (GrB_SUCCESS) ; }
grid.c
/* Copyright 2014-2015 The Regents of the University of California. * Copyright 2015-2019 Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * 2011-2019 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2014 Frank Ong <frankong@berkeley.edu> */ #include <math.h> #include <complex.h> #include <assert.h> #include <string.h> #include "num/multind.h" #include "num/flpmath.h" #include "num/specfun.h" #include "misc/nested.h" #include "misc/misc.h" #include "grid.h" static double kb(double beta, double x) { if (fabs(x) >= 0.5) return 0.; return bessel_i0(beta * sqrt(1. - pow(2. * x, 2.))) / bessel_i0(beta); } static void kb_precompute(double beta, int n, float table[n + 1]) { for (int i = 0; i < n + 1; i++) table[i] = kb(beta, (double)(i) / (double)(n - 1) / 2.); } static double ftkb(double beta, double x) { double a = sqrt(pow(beta, 2.) - pow(M_PI * x, 2.)); return ((0. == a) ? 1. : (a / sinh(a))); // * bessel_i0(beta); } static double rolloff(double x, double beta, double width) { return ftkb(beta, x * width) / ftkb(beta, 0.); } // Linear interpolation static float lerp(float a, float b, float c) { return (1. - c) * a + c * b; } // Linear interpolation look up static float intlookup(int n, const float table[n + 1], float x) { float fpart; // fpart = modff(x * n, &ipart); // int index = ipart; int index = (int)(x * (n - 1)); fpart = x * (n - 1) - (float)index; #if 1 assert(index >= 0); assert(index <= n); assert(fpart >= 0.); assert(fpart <= 1.); #endif float l = lerp(table[index], table[index + 1], fpart); #if 1 assert(l <= 1.); assert(0 >= 0.); #endif return l; } enum { kb_size = 100 }; static float kb_table[kb_size + 1]; static float kb_beta = -1.; void gridH(const struct grid_conf_s* conf, const complex float* traj, const long ksp_dims[4], complex float* dst, const long grid_dims[4], const complex float* grid) { long C = ksp_dims[3]; // precompute kaiser bessel table #pragma omp critical if (-1 == kb_beta) { kb_precompute(conf->beta, kb_size, kb_table); kb_beta = conf->beta; } assert(fabs(kb_beta - conf->beta) < 1.E-6); assert(1 == ksp_dims[0]); long samples = ksp_dims[1] * ksp_dims[2]; #pragma omp parallel for for(int i = 0; i < samples; i++) { float pos[3]; pos[0] = conf->os * (creal(traj[i * 3 + 0])); pos[1] = conf->os * (creal(traj[i * 3 + 1])); pos[2] = conf->os * (creal(traj[i * 3 + 2])); pos[0] += (grid_dims[0] > 1) ? ((float)grid_dims[0] / 2.) : 0.; pos[1] += (grid_dims[1] > 1) ? ((float)grid_dims[1] / 2.) : 0.; pos[2] += (grid_dims[2] > 1) ? ((float)grid_dims[2] / 2.) : 0.; complex float val[C]; for (int j = 0; j < C; j++) val[j] = 0.0; grid_pointH(C, 3, grid_dims, pos, val, grid, conf->periodic, conf->width, kb_size, kb_table); for (int j = 0; j < C; j++) dst[j * samples + i] += val[j]; } } void grid(const struct grid_conf_s* conf, const complex float* traj, const long grid_dims[4], complex float* grid, const long ksp_dims[4], const complex float* src) { long C = ksp_dims[3]; // precompute kaiser bessel table #pragma omp critical if (-1 == kb_beta) { kb_precompute(conf->beta, kb_size, kb_table); kb_beta = conf->beta; } assert(fabs(kb_beta - conf->beta) < 1.E-6); assert(1 == ksp_dims[0]); long samples = ksp_dims[1] * ksp_dims[2]; // grid #pragma omp parallel for for(int i = 0; i < samples; i++) { float pos[3]; pos[0] = conf->os * (creal(traj[i * 3 + 0])); pos[1] = conf->os * (creal(traj[i * 3 + 1])); pos[2] = conf->os * (creal(traj[i * 3 + 2])); pos[0] += (grid_dims[0] > 1) ? ((float) grid_dims[0] / 2.) : 0.; pos[1] += (grid_dims[1] > 1) ? ((float) grid_dims[1] / 2.) : 0.; pos[2] += (grid_dims[2] > 1) ? ((float) grid_dims[2] / 2.) : 0.; complex float val[C]; for (int j = 0; j < C; j++) val[j] = src[j * samples + i]; grid_point(C, 3, grid_dims, pos, grid, val, conf->periodic, conf->width, kb_size, kb_table); } } static void grid2_dims(unsigned int D, const long trj_dims[D], const long ksp_dims[D], const long grid_dims[D]) { assert(D >= 4); assert(md_check_compat(D - 3, ~0, grid_dims + 3, ksp_dims + 3)); // assert(md_check_compat(D - 3, ~(MD_BIT(0) | MD_BIT(1)), trj_dims + 3, ksp_dims + 3)); assert(md_check_bounds(D - 3, ~0, trj_dims + 3, ksp_dims + 3)); assert(3 == trj_dims[0]); assert(1 == trj_dims[3]); assert(1 == ksp_dims[0]); } void grid2(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long grid_dims[D], complex float* dst, const long ksp_dims[D], const complex float* src) { grid2_dims(D, trj_dims, ksp_dims, grid_dims); long ksp_strs[D]; md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE); long trj_strs[D]; md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE); long grid_strs[D]; md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE); long pos[D]; for (unsigned int i = 0; i < D; i++) pos[i] = 0; do { grid(conf, &MD_ACCESS(D, trj_strs, pos, traj), grid_dims, &MD_ACCESS(D, grid_strs, pos, dst), ksp_dims, &MD_ACCESS(D, ksp_strs, pos, src)); } while(md_next(D, ksp_dims, (~0 ^ 15), pos)); } void grid2H(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long ksp_dims[D], complex float* dst, const long grid_dims[D], const complex float* src) { grid2_dims(D, trj_dims, ksp_dims, grid_dims); long ksp_strs[D]; md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE); long trj_strs[D]; md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE); long grid_strs[D]; md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE); long pos[D]; for (unsigned int i = 0; i < D; i++) pos[i] = 0; do { gridH(conf, &MD_ACCESS(D, trj_strs, pos, traj), ksp_dims, &MD_ACCESS(D, ksp_strs, pos, dst), grid_dims, &MD_ACCESS(D, grid_strs, pos, src)); } while(md_next(D, ksp_dims, (~0 ^ 15), pos)); } typedef void CLOSURE_TYPE(grid_update_t)(long ind, float d); #ifndef __clang__ #define VLA(x) x #else // blocks extension does not play well even with arguments which // just look like variably-modified types #define VLA(x) #endif static void grid_point_gen(int N, const long dims[VLA(N)], const float pos[VLA(N)], bool periodic, float width, int kb_size, const float kb_table[VLA(kb_size + 1)], grid_update_t update) { #ifndef __clang__ int sti[N]; int eni[N]; int off[N]; #else // blocks extension does not play well with variably-modified types int* sti = alloca(sizeof(int[N])); int* eni = alloca(sizeof(int[N])); int* off = alloca(sizeof(int[N])); #endif for (int j = 0; j < N; j++) { sti[j] = (int)ceil(pos[j] - width); eni[j] = (int)floor(pos[j] + width); off[j] = 0; if (sti[j] > eni[j]) return; if (!periodic) { sti[j] = MAX(sti[j], 0); eni[j] = MIN(eni[j], dims[j] - 1); } else { while (sti[j] + off[j] < 0) off[j] += dims[j]; } if (1 == dims[j]) { assert(0. == pos[j]); // ==0. fails nondeterministically for test_nufft_forward bbdec08cb sti[j] = 0; eni[j] = 0; } } __block NESTED(void, grid_point_r, (int N, long ind, float d)) // __block for recursion { if (0 == N) { NESTED_CALL(update, (ind, d)); } else { N--; for (int w = sti[N]; w <= eni[N]; w++) { float frac = fabs(((float)w - pos[N])); float d2 = d * intlookup(kb_size, kb_table, frac / width); long ind2 = (ind * dims[N] + ((w + off[N]) % dims[N])); grid_point_r(N, ind2, d2); } } }; grid_point_r(N, 0, 1.); } void grid_point(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float* dst, const complex float val[VLA(ch)], bool periodic, float width, int kb_size, const float kb_table[kb_size + 1]) { NESTED(void, update, (long ind, float d)) { for (unsigned int c = 0; c < ch; c++) { // we are allowed to update real and imaginary part independently which works atomically #pragma omp atomic __real(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __real(val[c]) * d; #pragma omp atomic __imag(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __imag(val[c]) * d; } }; grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update); } void grid_pointH(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float val[VLA(ch)], const complex float* src, bool periodic, float width, int kb_size, const float kb_table[kb_size + 1]) { NESTED(void, update, (long ind, float d)) { for (unsigned int c = 0; c < ch; c++) { // we are allowed to update real and imaginary part independently which works atomically #pragma omp atomic __real(val[c]) += __real(src[ind + c * dims[0] * dims[1] * dims[2]]) * d; #pragma omp atomic __imag(val[c]) += __imag(src[ind + c * dims[0] * dims[1] * dims[2]]) * d; } }; grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update); } double calc_beta(float os, float width) { return M_PI * sqrt(pow((width * 2. / os) * (os - 0.5), 2.) - 0.8); } static float pos(int d, int i) { return (1 == d) ? 0. : (((float)i - (float)d / 2.) / (float)d); } void rolloff_correction(float os, float width, float beta, const long dimensions[3], complex float* dst) { UNUSED(os); #pragma omp parallel for collapse(3) for (int z = 0; z < dimensions[2]; z++) for (int y = 0; y < dimensions[1]; y++) for (int x = 0; x < dimensions[0]; x++) dst[x + dimensions[0] * (y + z * dimensions[1])] = rolloff(pos(dimensions[0], x), beta, width) * rolloff(pos(dimensions[1], y), beta, width) * rolloff(pos(dimensions[2], z), beta, width); }
nontemporal-1.c
/* { dg-do compile } */ /* { dg-additional-options "-O2" } */ #define N 1024 int a[N], b[N], c[N], d[N]; void foo (void) { int i; #pragma omp simd nontemporal (a, b) for (i = 0; i < N; ++i) a[i] = b[i] + c[i]; #pragma omp simd nontemporal (d) for (i = 0; i < N; ++i) d[i] = 2 * c[i]; }
order-4.c
int t; #pragma omp threadprivate(t) void f1 (void) { int i; #pragma omp simd order(concurrent) /* { dg-message "note: enclosing region" } */ for (i = 0; i < 64; i++) t++; /* { dg-error "threadprivate variable 't' used in a region with 'order\\(concurrent\\)' clause" } */ } void f2 (void) { int i; #pragma omp for simd order(concurrent) /* { dg-message "note: enclosing region" } */ for (i = 0; i < 64; i++) /* { dg-message "note: enclosing region" "" { target c++ } } */ t++; /* { dg-error "threadprivate variable 't' used in a region with 'order\\(concurrent\\)' clause" } */ } void f3 (void) { int i; #pragma omp for order(concurrent) /* { dg-message "note: enclosing region" } */ for (i = 0; i < 64; i++) t++; /* { dg-error "threadprivate variable 't' used in a region with 'order\\(concurrent\\)' clause" } */ }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/MagickCore.h" #include "MagickCore/exception-private.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); clone_view=(ImageView *) AcquireCriticalMemory(sizeof(*clone_view)); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickCoreSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickCoreSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DuplexTransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticMetacontent() returns the image view authentic % meta-content. % % The format of the GetImageViewAuthenticPixels method is: % % void *GetImageViewAuthenticMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport void *GetImageViewAuthenticMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % Quantum *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Quantum *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MagickPathExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualMetacontent() returns the image view virtual % meta-content. % % The format of the GetImageViewVirtualMetacontent method is: % % const void *GetImageViewVirtualMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const void *GetImageViewVirtualMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const Quantum *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const Quantum *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % ExceptionInfo *exception) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height, ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickCoreSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetImageViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; status=SyncCacheViewAuthenticPixels(source->view,source->exception); if (status == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UpdateImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
DRB075-getthreadnum-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Test if the semantics of omp_get_thread_num() is correctly recognized. Thread with id 0 writes numThreads while other threads read it, causing data races. Data race pair: numThreads@60 vs. numThreads@64. */ #include <omp.h> #include <stdio.h> int main() { int numThreads=0 ; #pragma omp parallel { if ( omp_get_thread_num()==0 ) { numThreads = omp_get_num_threads(); } else { printf("numThreads=%d\n", numThreads); } } return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
wshfl.c
/* Copyright 2018-2019. Massachusetts Institute of Technology. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2018-2019 Siddharth Iyer <ssi@mit.edu> * * Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M. * T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging. * Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95. * * B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant, * LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D * imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347 * * Iyer S, Bilgic B, Setsompop K. * Faster T2 shuffling with Wave. * Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018. * https://www.ismrm.org/18/program_files/O67.htm */ #include <stdbool.h> #include <complex.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "num/multind.h" #include "num/flpmath.h" #include "num/fft.h" #include "num/init.h" #include "num/iovec.h" #include "num/ops.h" #include "num/ops_p.h" #ifdef USE_CUDA #include "num/gpuops.h" #endif #include "iter/iter.h" #include "iter/lsqr.h" #include "iter/misc.h" #include "linops/linop.h" #include "linops/fmac.h" #include "linops/someops.h" #include "linops/decompose_complex.h" #include "misc/debug.h" #include "misc/mri.h" #include "misc/utils.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/opts.h" #include "wavelet/wavthresh.h" #include "lowrank/lrthresh.h" static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>"; static const char help_str[] = "Perform a wave-shuffling reconstruction.\n\n" "Conventions:\n" " * (sx, sy, sz) - Spatial dimensions.\n" " * wx - Extended FOV in READ_DIM due to\n" " wave's voxel spreading.\n" " * (nc, md) - Number of channels and ESPIRiT's \n" " extended-SENSE model operator\n" " dimensions (or # of maps).\n" " * (tf, tk) - Turbo-factor and the rank\n" " of the temporal basis used in\n" " shuffling.\n" " * ntr - Number of TRs, or the number of\n" " (ky, kz) points acquired of one\n" " echo image.\n" " * n - Total number of (ky, kz) points\n" " acquired. This is equal to the\n" " product of ntr and tf.\n\n" "Descriptions:\n" " * reorder is an (n by 3) index matrix such that\n" " [ky, kz, t] = reorder(i, :) represents the\n" " (ky, kz) kspace position of the readout line\n" " acquired at echo number (t), and 0 <= ky < sy,\n" " 0 <= kz < sz, 0 <= t < tf).\n" " * table is a (wx by nc by n) matrix such that\n" " table(:, :, k) represents the kth multichannel\n" " kspace line.\n\n" "Expected dimensions:\n" " * maps - ( sx, sy, sz, nc, md, 1, 1)\n" " * wave - ( wx, sy, sz, 1, 1, 1, 1)\n" " * phi - ( 1, 1, 1, 1, 1, tf, tk)\n" " * output - ( sx, sy, sz, 1, md, 1, tk)\n" " * reorder - ( n, 3, 1, 1, 1, 1, 1)\n" " * table - ( wx, nc, n, 1, 1, 1, 1)"; /* Helper function to print out operator dimensions. */ static void print_opdims(const struct linop_s* op) { const struct iovec_s* domain = linop_domain(op); const struct iovec_s* codomain = linop_codomain(op); debug_printf(DP_INFO, "\tDomain: ["); for (long k = 0; k < domain->N; k ++) debug_printf(DP_INFO, "%6ld", domain->dims[k]); debug_printf(DP_INFO, "]\n"); debug_printf(DP_INFO, "\tCodomain: ["); for (long k = 0; k < codomain->N; k ++) debug_printf(DP_INFO, "%6ld", codomain->dims[k]); debug_printf(DP_INFO, "]\n"); } /* Construct sampling mask array from reorder tables. */ static void construct_mask( long reorder_dims[DIMS], complex float* reorder, long mask_dims[DIMS], complex float* mask) { long n = reorder_dims[0]; long sy = mask_dims[1]; long sz = mask_dims[2]; long y = 0; long z = 0; long t = 0; for (int i = 0; i < n; i++) { y = lround(creal(reorder[i])); z = lround(creal(reorder[i + n])); t = lround(creal(reorder[i + 2 * n])); mask[(y + z * sy) + t * sy * sz] = 1; } } struct kern_s { INTERFACE(linop_data_t); unsigned int N; long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1) long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1) long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1) long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk) complex float* reorder; complex float* phi; complex float* kernel; complex float* gpu_kernel; }; static DEF_TYPEID(kern_s); /* Go to table from coefficient-kspace with memory efficiency. */ static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long input_dims[] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src); unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE); long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_in_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_in_str[4]; md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); int y = -1; int z = -1; int t = -1; for (int i = 0; i < n; i ++) { y = lround(creal(data->reorder[i])); z = lround(creal(data->reorder[i + n])); t = lround(creal(data->reorder[i + 2 * n])); md_clear(4, vec_dims, vec, CFL_SIZE); md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi); md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE); } md_free(perm); md_free(vec); } /* Collapse data table into the temporal basis for memory efficiency. */ static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst); md_clear(DIMS, perm_dims, perm, CFL_SIZE); #ifdef _OPENMP long num_threads = omp_get_max_threads(); #else long num_threads = 1; #endif long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_out_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; long vthrd_dims[] = {wx, nc, tf, 1, num_threads}; complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst); md_clear(5, vthrd_dims, vec, CFL_SIZE); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_out_str[4]; md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); long flag_dims[1] = { n }; complex float* flags = md_calloc(1, flag_dims, CFL_SIZE); #pragma omp parallel for for (int k = 0; k < n; k ++) { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = 0; #endif int y = lround(creal(data->reorder[k])); int z = lround(creal(data->reorder[k + n])); int t = -1; if (0 == flags[k]) { md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE); for (int i = k; i < n; i ++) { if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) { flags[i] = 1; t = lround(creal(data->reorder[i + 2 * n])); md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE); } } md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi); } } long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = wx; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = nc; out_dims[6] = tk; unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE); md_free(vec); md_free(perm); md_free(flags); } static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long tk = data->phi_dims[6]; long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long input_str[DIMS]; md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE); long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[6] = 1; output_dims[7] = tk; long output_str[DIMS]; md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE); long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims); gpu_kernel_dims[0] = wx; gpu_kernel_dims[3] = nc; long kernel_str[DIMS]; md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE); long gpu_kernel_str[DIMS]; md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE); long fmac_dims[DIMS]; md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims); md_clear(DIMS, output_dims, dst, CFL_SIZE); #ifdef USE_CUDA if(cuda_ondevice(src)) md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel); else #endif md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel); } static void kern_free(const linop_data_t* _data) { const struct kern_s* data = CAST_DOWN(kern_s, _data); xfree(data->reorder_dims); xfree(data->phi_dims); xfree(data->table_dims); xfree(data->kernel_dims); #ifdef USE_CUDA if (data->gpu_kernel != NULL) md_free(data->gpu_kernel); #endif xfree(data); } static const struct linop_s* linop_kern_create(bool gpu_flag, const long _reorder_dims[DIMS], complex float* reorder, const long _phi_dims[DIMS], complex float* phi, const long _kernel_dims[DIMS], complex float* kernel, const long _table_dims[DIMS]) { PTR_ALLOC(struct kern_s, data); SET_TYPEID(kern_s, data); PTR_ALLOC(long[DIMS], reorder_dims); PTR_ALLOC(long[DIMS], phi_dims); PTR_ALLOC(long[DIMS], table_dims); PTR_ALLOC(long[DIMS], kernel_dims); md_copy_dims(DIMS, *reorder_dims, _reorder_dims); md_copy_dims(DIMS, *phi_dims, _phi_dims); md_copy_dims(DIMS, *table_dims, _table_dims); md_copy_dims(DIMS, *kernel_dims, _kernel_dims); data->reorder_dims = *PTR_PASS(reorder_dims); data->phi_dims = *PTR_PASS(phi_dims); data->table_dims = *PTR_PASS(table_dims); data->kernel_dims = *PTR_PASS(kernel_dims); data->reorder = reorder; data->phi = phi; data->kernel = kernel; data->gpu_kernel = NULL; #ifdef USE_CUDA if(gpu_flag) { long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims); repmat_kernel_dims[0] = _table_dims[0]; repmat_kernel_dims[3] = _table_dims[1]; long kernel_strs[DIMS]; long repmat_kernel_strs[DIMS]; md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE); md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE); complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE); md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE); data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE); md_free(repmat_kernel); } #else UNUSED(gpu_flag); #endif long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = _table_dims[0]; input_dims[1] = _kernel_dims[1]; input_dims[2] = _kernel_dims[2]; input_dims[3] = _table_dims[1]; input_dims[6] = _phi_dims[6]; long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; output_dims[0] = _table_dims[0]; output_dims[1] = _table_dims[1]; output_dims[2] = _reorder_dims[0]; const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free); return K; } struct multc_s { INTERFACE(linop_data_t); unsigned int nc; unsigned int md; const complex float* maps; const struct linop_s* sc_op; // Single channel operator. }; static DEF_TYPEID(multc_s); static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* fwd = data->sc_op->forward; const long* sc_inp_dims = linop_domain(data->sc_op)->dims; const long* sc_out_dims = linop_codomain(data->sc_op)->dims; long sx = sc_inp_dims[0]; long sy = sc_inp_dims[1]; long sz = sc_inp_dims[2]; long wx = sc_out_dims[0]; long n = sc_out_dims[2]; long nc = data->nc; long md = data->md; long src_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, src_dims, sc_inp_dims); src_dims[MAPS_DIM] = md; long dst_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dst_dims, sc_out_dims); dst_dims[1] = nc; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src); long tbl_dims[] = { [0 ... DIMS - 1] = 1}; tbl_dims[0] = wx; tbl_dims[1] = n; tbl_dims[2] = nc; complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src); md_clear(DIMS, tbl_dims, tbl, CFL_SIZE); long pos[] = { [0 ... DIMS - 1] = 0 }; long zfmac_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, zfmac_dims, src_dims); long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_src[DIMS]; md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE); long strides_sc_inp[DIMS]; md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map); operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer); } md_clear(DIMS, dst_dims, dst, CFL_SIZE); unsigned int permute_order[DIMS] = {0, 2, 1}; for (unsigned int i = 3; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE); md_free(single_map); md_free(buffer); md_free(tbl); } static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* adj = data->sc_op->adjoint; const long* sc_inp_dims = linop_codomain(data->sc_op)->dims; const long* sc_out_dims = linop_domain(data->sc_op)->dims; long sx = sc_out_dims[0]; long sy = sc_out_dims[1]; long sz = sc_out_dims[2]; long wx = sc_inp_dims[0]; long n = sc_inp_dims[2]; long nc = data->nc; long md = data->md; long src_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, src_dims, sc_inp_dims); src_dims[1] = nc; long dst_dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dst_dims, sc_out_dims); dst_dims[MAPS_DIM] = md; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src); complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src); long tbl_dims[] = { [0 ... DIMS - 1] = 1}; tbl_dims[0] = wx; tbl_dims[2] = n; complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src); long pos[] = { [0 ... DIMS - 1] = 0 }; long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_sc_out[DIMS]; md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE); long strides_dst[DIMS]; md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE); md_clear(DIMS, dst_dims, dst, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE); md_clear(DIMS, dst_dims, buffer2, CFL_SIZE); md_clear(DIMS, tbl_dims, tbl, CFL_SIZE); pos[1] = k; md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE); pos[1] = 0; operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map); md_zadd(DIMS, dst_dims, dst, dst, buffer2); } md_free(single_map); md_free(buffer1); md_free(buffer2); md_free(tbl); } static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct multc_s* data = CAST_DOWN(multc_s, _data); // Loading single channel operator. const struct operator_s* nrm = data->sc_op->normal; const long* sc_dims = linop_domain(data->sc_op)->dims; long sx = sc_dims[0]; long sy = sc_dims[1]; long sz = sc_dims[2]; long nc = data->nc; long md = data->md; long dims[] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, dims, sc_dims); dims[MAPS_DIM] = md; long map_dims[] = { [0 ... DIMS - 1] = 1}; map_dims[0] = sx; map_dims[1] = sy; map_dims[2] = sz; map_dims[3] = nc; map_dims[4] = md; long single_map_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_map_dims, map_dims); single_map_dims[COIL_DIM] = 1; complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src); complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src); complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src); complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src); long pos[] = { [0 ... DIMS - 1] = 0 }; long strides_single_map[DIMS]; md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE); long strides_sc[DIMS]; md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE); long strides[DIMS]; md_calc_strides(DIMS, strides, dims, CFL_SIZE); md_clear(DIMS, dims, dst, CFL_SIZE); for (long k = 0; k < data->nc; k++) { md_clear(DIMS, single_map_dims, single_map, CFL_SIZE); md_clear(DIMS, sc_dims, buffer1, CFL_SIZE); md_clear(DIMS, sc_dims, buffer2, CFL_SIZE); md_clear(DIMS, dims, buffer3, CFL_SIZE); pos[COIL_DIM] = k; md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE); pos[COIL_DIM] = 0; md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map); operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1); md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map); md_zadd(DIMS, dims, dst, dst, buffer3); } md_free(single_map); md_free(buffer1); md_free(buffer2); md_free(buffer3); } static void multc_free(const linop_data_t* _data) { const struct multc_s* data = CAST_DOWN(multc_s, _data); xfree(data); } static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op) { PTR_ALLOC(struct multc_s, data); SET_TYPEID(multc_s, data); data->nc = nc; data->md = md; data->maps = maps; data->sc_op = sc_op; long* op_inp_dims = (long*) linop_domain(sc_op)->dims; long* op_out_dims = (long*) linop_codomain(sc_op)->dims; long input_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, input_dims, op_inp_dims); input_dims[MAPS_DIM] = md; long output_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, output_dims, op_out_dims); output_dims[1] = nc; struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free); return E; } /* Resize operator. */ static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk) { long input_dims[] = { [0 ... DIMS - 1] = 1}; input_dims[0] = sx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[0] = wx; struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims); return R; } /* Fx operator. */ static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fx = NULL; if (centered) Fx = linop_fftc_create(DIMS, dims, READ_FLAG); else Fx = linop_fft_create(DIMS, dims, READ_FLAG); return Fx; } /* Wave operator. */ static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, long psf_tk, complex float* psf) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; return (psf_tk > 1) ? linop_cdiag_create(DIMS, dims, FFT_FLAGS | COEFF_FLAG, psf) : linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf); } /* Fyz operator. */ static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fyz = NULL; if (centered) Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG); else Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG); return Fyz; } /* Construction sampling temporal kernel.*/ static void construct_kernel( long mask_dims[DIMS], complex float* mask, long phi_dims[DIMS], complex float* phi, long kern_dims[DIMS], complex float* kern) { long sy = mask_dims[1]; long sz = mask_dims[2]; long tf = phi_dims[5]; long tk = phi_dims[6]; long cvec_dims[] = { [0 ... DIMS - 1] = 1 }; cvec_dims[6] = tk; long cvec_str[DIMS]; md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE); complex float cvec[tk]; long tvec_dims[] = { [0 ... DIMS - 1] = 1 }; tvec_dims[5] = tf; long tvec_str[DIMS]; md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE); complex float mvec[tf]; complex float tvec1[tf]; complex float tvec2[tf]; long phi_str[DIMS]; md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE); long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = tk; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = tk; complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE); for (int y = 0; y < sy; y ++) { for (int z = 0; z < sz; z ++) { for (int t = 0; t < tf; t ++) mvec[t] = mask[(y + sy * z) + (sy * sz) * t]; for (int t = 0; t < tk; t ++) { cvec[t] = 1; md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE); md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi); md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE); md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec); md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE); md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk, tvec_str, tvec2, phi_str, phi); cvec[t] = 0; } } } unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE); md_free(out); } static void fftmod_apply(long sy, long sz, long reorder_dims[DIMS], complex float* reorder, long table_dims[DIMS], complex float* table, long maps_dims[DIMS], complex float* maps) { long wx = table_dims[0]; long nc = table_dims[1]; fftmod(DIMS, table_dims, READ_FLAG, table, table); fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps); long y = -1; long z = -1; double dy = ((double) sy/2)/((double) sy); double dz = ((double) sz/2)/((double) sz); complex float py = 1; complex float pz = 1; long dims[] = { [0 ... DIMS] = 1}; dims[0] = wx; dims[1] = nc; long n = reorder_dims[0]; for (long k = 0; k < n; k++) { y = lround(creal(reorder[k])); z = lround(creal(reorder[k + n])); py = cexp(2.i * M_PI * dy * y); pz = cexp(2.i * M_PI * dz * z); md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz); } } enum algo_t { CG, IST, FISTA }; int main_wshfl(int argc, char* argv[]) { double start_time = timestamp(); float lambda = 1E-5; int maxiter = 300; int blksize = 8; float step = 0.5; float tol = 1.E-3; bool llr = false; bool wav = false; bool fista = false; bool hgwld = false; float cont = 1; float eval = -1; const char* fwd = NULL; const char* x0 = NULL; int gpun = -1; bool dcx = false; bool pf = false; const struct opt_s opts[] = { OPT_FLOAT( 'r', &lambda, "lambda", "Soft threshold lambda for wavelet or locally low rank."), OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."), OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."), OPT_FLOAT( 's', &step, "stepsz", "Step size for iterative method."), OPT_FLOAT( 'c', &cont, "cntnu", "Continuation value for IST/FISTA."), OPT_FLOAT( 't', &tol, "toler", "Tolerance convergence condition for iterative method."), OPT_FLOAT( 'e', &eval, "eigvl", "Maximum eigenvalue of normal operator, if known."), OPT_STRING('F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."), OPT_STRING('O', &x0, "initl", "Initialize reconstruction with guess."), OPT_INT( 'g', &gpun, "gpunm", "GPU device number."), OPT_SET( 'f', &fista, "Reconstruct using FISTA instead of IST."), OPT_SET( 'H', &hgwld, "Use hogwild in IST/FISTA."), OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."), OPT_SET( 'w', &wav, "Use wavelet."), OPT_SET( 'l', &llr, "Use locally low rank across temporal coefficients."), OPT_SET( 'p', &pf, "Use locally low rank and real-imaginary components for partial fourier."), }; cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts); if (pf) dcx = true; debug_printf(DP_INFO, "Loading data... "); long maps_dims[DIMS]; complex float* maps = load_cfl(argv[1], DIMS, maps_dims); long wave_dims[DIMS]; complex float* wave = load_cfl(argv[2], DIMS, wave_dims); long phi_dims[DIMS]; complex float* phi = load_cfl(argv[3], DIMS, phi_dims); long reorder_dims[DIMS]; complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims); long table_dims[DIMS]; complex float* table = load_cfl(argv[5], DIMS, table_dims); debug_printf(DP_INFO, "Done.\n"); if (gpun >= 0) num_init_gpu_device(gpun); else num_init(); int wx = wave_dims[0]; int sx = maps_dims[0]; int sy = maps_dims[1]; int sz = maps_dims[2]; int nc = maps_dims[3]; int md = maps_dims[4]; int tf = phi_dims[5]; int tk = phi_dims[6]; debug_printf(DP_INFO, "Constructing sampling mask from reorder table... "); long mask_dims[] = { [0 ... DIMS - 1] = 1 }; mask_dims[1] = sy; mask_dims[2] = sz; mask_dims[5] = tf; complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE); construct_mask(reorder_dims, reorder, mask_dims, mask); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Constructing sampling-temporal kernel... "); long kernel_dims[] = { [0 ... DIMS - 1] = 1 }; kernel_dims[1] = sy; kernel_dims[2] = sz; kernel_dims[6] = tk; kernel_dims[7] = tk; complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE); construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel); md_free(mask); debug_printf(DP_INFO, "Done.\n"); long coeff_dims[] = { [0 ... DIMS - 1] = 1 }; coeff_dims[0] = sx; coeff_dims[1] = sy; coeff_dims[2] = sz; coeff_dims[4] = md; coeff_dims[6] = tk; coeff_dims[8] = dcx ? 2 : 1; debug_printf(DP_INFO, "Creating single channel linear operators:\n"); double t1; double t2; t1 = timestamp(); const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk); t2 = timestamp(); debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false); t2 = timestamp(); debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave); t2 = timestamp(); debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1); t1 = timestamp(); const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false); t2 = timestamp(); debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1); t1 = timestamp(); long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 }; md_copy_dims(DIMS, single_channel_table_dims, table_dims); single_channel_table_dims[1] = 1; const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims); t2 = timestamp(); debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1); struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF( R, Fx), W), Fyz), K); debug_printf(DP_INFO, "Single channel forward operator information:\n"); print_opdims(A_sc); if (eval < 0) #ifdef USE_CUDA eval = (gpun >= 0) ? estimate_maxeigenval_gpu(A_sc->normal) : estimate_maxeigenval(A_sc->normal); #else eval = estimate_maxeigenval(A_sc->normal); #endif debug_printf(DP_INFO, "\tMax eval: %.2e\n", eval); step /= eval; struct linop_s* A = linop_multc_create(nc, md, maps, A_sc); debug_printf(DP_INFO, "Overall forward linear operator information:\n"); print_opdims(A); if (fwd != NULL) { debug_printf(DP_INFO, "Going from coefficients to data table... "); complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims); complex float* table_forward = create_cfl(argv[6], DIMS, table_dims); const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk); const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true); const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave); const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true); const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims); struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF( R, CFx), W), CFyz), K); struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc); operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Cleaning up... "); linop_free(AC); linop_free(AC_sc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, table_dims, table_forward); debug_printf(DP_INFO, "Done.\n"); return 0; } if (dcx) { debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n"); struct linop_s* tmp = A; struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims); A = linop_chain(dcxop, tmp); linop_free(dcxop); linop_free(tmp); } debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... "); float norm = md_znorm(DIMS, table_dims, table); md_zsmul(DIMS, table_dims, table, table, 1. / norm); fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps); debug_printf(DP_INFO, "Done.\n"); const struct operator_p_s* T = NULL; long blkdims[MAX_LEV][DIMS]; long minsize[] = { [0 ... DIMS - 1] = 1 }; minsize[0] = MIN(sx, 16); minsize[1] = MIN(sy, 16); minsize[2] = MIN(sz, 16); unsigned int WAVFLAG = (sx > 1) * READ_FLAG | (sy > 1) * PHS1_FLAG | (sz > 2) * PHS2_FLAG; enum algo_t algo = CG; if ((wav) || (llr) || (pf)) { algo = (fista) ? FISTA : IST; if (wav) { debug_printf(DP_INFO, "Creating wavelet threshold operator... "); T = prox_wavelet_thresh_create(DIMS, coeff_dims, WAVFLAG, 0u, minsize, lambda, true); } else if (llr) { debug_printf(DP_INFO, "Creating locally low rank threshold operator across coeff and real-imag... "); llr_blkdims(blkdims, ~(COEFF_FLAG | ITER_FLAG), coeff_dims, blksize); T = lrthresh_create(coeff_dims, true, ~(COEFF_FLAG | ITER_FLAG), (const long (*)[])blkdims, lambda, false, false, false); } else { assert(dcx); debug_printf(DP_INFO, "Creating locally low rank threshold operator across real-imag... "); llr_blkdims(blkdims, ~ITER_FLAG, coeff_dims, blksize); T = lrthresh_create(coeff_dims, true, ~ITER_FLAG, (const long (*)[])blkdims, lambda, false, false, false); } debug_printf(DP_INFO, "Done.\n"); } italgo_fun2_t italgo = iter2_call_iter; struct iter_call_s iter2_data; SET_TYPEID(iter_call_s, &iter2_data); iter_conf* iconf = CAST_UP(&iter2_data); struct iter_conjgrad_conf cgconf = iter_conjgrad_defaults; struct iter_fista_conf fsconf = iter_fista_defaults; struct iter_ist_conf isconf = iter_ist_defaults; switch(algo) { case IST: debug_printf(DP_INFO, "Using IST.\n"); debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda); debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter); debug_printf(DP_INFO, "\tStep size: %0.2e\n", step); debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld); debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol); debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont); isconf = iter_ist_defaults; isconf.step = step; isconf.maxiter = maxiter; isconf.tol = tol; isconf.continuation = cont; isconf.hogwild = hgwld; iter2_data.fun = iter_ist; iter2_data._conf = CAST_UP(&isconf); break; case FISTA: debug_printf(DP_INFO, "Using FISTA.\n"); debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda); debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter); debug_printf(DP_INFO, "\tStep size: %0.2e\n", step); debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld); debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol); debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont); fsconf = iter_fista_defaults; fsconf.maxiter = maxiter; fsconf.step = step; fsconf.hogwild = hgwld; fsconf.tol = tol; fsconf.continuation = cont; iter2_data.fun = iter_fista; iter2_data._conf = CAST_UP(&fsconf); break; default: case CG: debug_printf(DP_INFO, "Using CG.\n"); debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter); debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol); cgconf = iter_conjgrad_defaults; cgconf.maxiter = maxiter; cgconf.l2lambda = 0; cgconf.tol = tol; iter2_data.fun = iter_conjgrad; iter2_data._conf = CAST_UP(&cgconf); break; } complex float* init = NULL; if (x0 != NULL) { debug_printf(DP_INFO, "Loading in initial guess... "); init = load_cfl(x0, DIMS, coeff_dims); debug_printf(DP_INFO, "Done.\n"); } debug_printf(DP_INFO, "Reconstruction... "); complex float* recon = create_cfl(argv[6], DIMS, coeff_dims); struct lsqr_conf lsqr_conf = { 0., gpun >= 0 }; double recon_start = timestamp(); const struct operator_p_s* J = lsqr2_create(&lsqr_conf, italgo, iconf, (const float*) init, A, NULL, 1, &T, NULL, NULL); operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table); md_zsmul(DIMS, coeff_dims, recon, recon, norm); double recon_end = timestamp(); debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start); debug_printf(DP_INFO, "Cleaning up and saving result... "); operator_p_free(J); linop_free(A); linop_free(A_sc); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, coeff_dims, recon); if (x0 != NULL) unmap_cfl(DIMS, coeff_dims, init); debug_printf(DP_INFO, "Done.\n"); double end_time = timestamp(); debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time); return 0; }
pr32362-2.c
/* PR middle-end/32362 */ /* { dg-do run } */ #include <omp.h> #include <stdlib.h> int a = 2, b = 4; int main () { int n[4] = { -1, -1, -1, -1 }; omp_set_num_threads (4); omp_set_dynamic (0); omp_set_nested (1); #pragma omp parallel private(b) { b = omp_get_thread_num (); #pragma omp parallel firstprivate(a) { a = (omp_get_thread_num () + a) + 1; if (b == omp_get_thread_num ()) n[omp_get_thread_num ()] = a + (b << 4); } } if (n[0] != 3) abort (); if (n[3] != -1 && (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36)) abort (); return 0; }
pfmg_setup_rap5.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "pfmg.h" /*-------------------------------------------------------------------------- * Macro to "change coordinates". This routine is written as though * coarsening is being done in the y-direction. This macro is used to * allow for coarsening to be done in the x-direction also. *--------------------------------------------------------------------------*/ #define MapIndex(in_index, cdir, out_index) \ hypre_IndexD(out_index, 2) = hypre_IndexD(in_index, 2); \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \ cdir = (cdir + 1) % 2; \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \ cdir = (cdir + 1) % 2; /*-------------------------------------------------------------------------- * hypre_PFMGCreateCoarseOp5 * Sets up new coarse grid operator stucture. Fine grid * operator is 5pt and so is coarse, i.e. non-Galerkin. *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_PFMGCreateCoarseOp5( hypre_StructMatrix *R, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructGrid *coarse_grid, HYPRE_Int cdir ) { hypre_StructMatrix *RAP; hypre_Index *RAP_stencil_shape; hypre_StructStencil *RAP_stencil; HYPRE_Int RAP_stencil_size; HYPRE_Int RAP_stencil_dim; HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1}; hypre_Index index_temp; HYPRE_Int j, i; HYPRE_Int stencil_rank; RAP_stencil_dim = 2; /*----------------------------------------------------------------------- * Define RAP_stencil *-----------------------------------------------------------------------*/ stencil_rank = 0; /*----------------------------------------------------------------------- * non-symmetric case *-----------------------------------------------------------------------*/ if (!hypre_StructMatrixSymmetric(A)) { /*-------------------------------------------------------------------- * 5 point coarse grid stencil *--------------------------------------------------------------------*/ RAP_stencil_size = 5; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 2; j++) { for (i = -1; i < 2; i++) { /*-------------------------------------------------------------- * Storage for 5 elements (c,w,e,n,s) *--------------------------------------------------------------*/ if (i*j == 0) { hypre_SetIndex3(index_temp,i,j,0); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } } /*----------------------------------------------------------------------- * symmetric case *-----------------------------------------------------------------------*/ else { /*-------------------------------------------------------------------- * 5 point coarse grid stencil * Only store the lower triangular part + diagonal = 3 entries, * lower triangular means the lower triangular part on the matrix * in the standard lexicographic ordering. *--------------------------------------------------------------------*/ RAP_stencil_size = 3; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 1; j++) { for (i = -1; i < 1; i++) { /*-------------------------------------------------------------- * Store 3 elements in (c,w,s) *--------------------------------------------------------------*/ if( i*j == 0 ) { hypre_SetIndex3(index_temp,i,j,0); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } } RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size, RAP_stencil_shape); RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A), coarse_grid, RAP_stencil); hypre_StructStencilDestroy(RAP_stencil); /*----------------------------------------------------------------------- * Coarse operator in symmetric iff fine operator is *-----------------------------------------------------------------------*/ hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A); /*----------------------------------------------------------------------- * Set number of ghost points - one one each boundary *-----------------------------------------------------------------------*/ hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost); return RAP; } /*-------------------------------------------------------------------------- * hypre_PFMGBuildCoarseOp5 * Sets up new coarse grid operator stucture. Fine grid operator is 5pt and * so is coarse, i.e. non-Galerkin. * * Uses the non-Galerkin strategy from Ashby & Falgout's original ParFlow * algorithm. For constant_coefficient==2, see [issue663]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PFMGBuildCoarseOp5( hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { HYPRE_Int ndim = hypre_StructMatrixNDim(A); hypre_Index index; hypre_Index index_temp; hypre_StructGrid *fgrid; hypre_BoxArray *fgrid_boxes; hypre_Box *fgrid_box; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; HYPRE_Int *cgrid_ids; hypre_IndexRef cstart, bfstart, stridef; hypre_Index fstart, bcstart, stridec; hypre_Index loop_size; HYPRE_Int constant_coefficient; HYPRE_Int fi, ci, fbi; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *RAP_dbox; hypre_BoxArray *bdy_boxes, *tmp_boxes; hypre_Box *bdy_box, *fcbox; HYPRE_Real *pb, *pa; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cb, *a_ca; HYPRE_Real *rap_cc, *rap_cw, *rap_ce; HYPRE_Real *rap_cb, *rap_ca; HYPRE_Real west, east; HYPRE_Real center_int, center_bdy; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iPm1, iPp1; HYPRE_Int OffsetA; HYPRE_Int OffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); fgrid = hypre_StructMatrixGrid(A); fgrid_boxes = hypre_StructGridBoxes(fgrid); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP); hypre_assert( hypre_StructMatrixConstantCoefficient(A) == constant_coefficient ); if ( constant_coefficient==0 ) { hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 0 ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 0 ); } else /* 1 or 2 */ { hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 1 ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 1 ); } fcbox = hypre_BoxCreate(ndim); bdy_boxes = hypre_BoxArrayCreate(0, ndim); tmp_boxes = hypre_BoxArrayCreate(0, ndim); fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); fgrid_box = hypre_BoxArrayBox(fgrid_boxes, fi); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pb is pointer for weight for f-point below c-point * pa is pointer for weight for f-point above c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cb is pointer for below coefficient * a_ca is pointer for above coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cb = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_ca = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cb = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_ca = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); OffsetP = hypre_BoxOffsetDistance(P_dbox,index); OffsetA = hypre_BoxOffsetDistance(A_dbox,index); /*-------------------------------------------------------------- * Loop for symmetric 5-point fine grid operator; produces a * symmetric 5-point coarse grid operator. *--------------------------------------------------------------*/ if ( constant_coefficient==0 ) { hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iA,iAc,iAm1,iAp1,iPm1,iPp1,west,east) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(iP, iA, iAc) { iAm1 = iA - OffsetA; iAp1 = iA + OffsetA; iPm1 = iP - OffsetP; iPp1 = iP + OffsetP; rap_cb[iAc] = a_cb[iA] * pa[iPm1]; rap_ca[iAc] = a_ca[iA] * pb[iPp1]; west = a_cw[iA] + 0.5 * a_cw[iAm1] + 0.5 * a_cw[iAp1]; east = a_ce[iA] + 0.5 * a_ce[iAm1] + 0.5 * a_ce[iAp1]; /*----------------------------------------------------- * Prevent non-zero entries reaching off grid *-----------------------------------------------------*/ if(a_cw[iA] == 0.0) west = 0.0; if(a_ce[iA] == 0.0) east = 0.0; rap_cw[iAc] = west; rap_ce[iAc] = east; rap_cc[iAc] = a_cc[iA] + a_cw[iA] + a_ce[iA] + a_cb[iA] * pb[iP] + a_ca[iA] * pa[iP] - west - east; } hypre_BoxLoop3End(iP, iA, iAc); } else if ( constant_coefficient==1 ) { rap_cb[0] = rap_ca[0] = a_cb[0] * pa[0]; rap_cw[0] = rap_ce[0] = 2.0*a_cw[0]; rap_cc[0] = a_cc[0] - 2.0*( a_cw[0] - rap_cb[0] ); } else if ( constant_coefficient==2 ) { /* NOTE: This does not reduce to either of the above operators unless * the row sum is zero and the interpolation weights are 1/2 */ rap_cb[0] = rap_ca[0] = 0.5*a_cb[0]; rap_cw[0] = rap_ce[0] = 2.0*a_cw[0]; center_int = 3.0*a_cb[0]; center_bdy = 0.5*a_cb[0] + (a_cw[0] + a_cb[0]); hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(iA, iAc) { rap_cc[iAc] = 2.0*a_cc[iA] + center_int; } hypre_BoxLoop2End(iA, iAc); hypre_CopyBox(cgrid_box, fcbox); hypre_StructMapCoarseToFine(hypre_BoxIMin(fcbox), cindex, cstride, hypre_BoxIMin(fcbox)); hypre_StructMapCoarseToFine(hypre_BoxIMax(fcbox), cindex, cstride, hypre_BoxIMax(fcbox)); hypre_BoxArraySetSize(bdy_boxes, 0); if (hypre_BoxIMinD(fcbox, cdir) == hypre_BoxIMinD(fgrid_box, cdir)) { hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, -1, bdy_boxes); } if (hypre_BoxIMaxD(fcbox, cdir) == hypre_BoxIMaxD(fgrid_box, cdir)) { hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, 1, tmp_boxes); hypre_AppendBoxArray(tmp_boxes, bdy_boxes); } hypre_ForBoxI(fbi, bdy_boxes) { bdy_box = hypre_BoxArrayBox(bdy_boxes, fbi); hypre_BoxGetSize(bdy_box, loop_size); bfstart = hypre_BoxIMin(bdy_box); hypre_StructMapFineToCoarse(bfstart, cindex, cstride, bcstart); hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, A_dbox, bfstart, stridef, iA, RAP_dbox, bcstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(iA, iAc) { rap_cc[iAc] -= 0.5*a_cc[iA] + center_bdy; } hypre_BoxLoop2End(iA, iAc); } } } /* end ForBoxI */ hypre_BoxDestroy(fcbox); hypre_BoxArrayDestroy(bdy_boxes); hypre_BoxArrayDestroy(tmp_boxes); return hypre_error_flag; }
MPI.h
/** * @file * This file is part of SeisSol. * * @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger) * * @section LICENSE * Copyright (c) 2015-2016, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * MPI Wrapper */ #ifndef MPI_H #define MPI_H #ifndef USE_MPI #include "MPIDummy.h" #else // USE_MPI #include <mpi.h> #include "utils/logger.h" #include "MPIBasic.h" #ifdef ACL_DEVICE #include <cstdlib> #include <string> #include <sstream> #include <device.h> #endif // ACL_DEVICE #endif // USE_MPI namespace seissol { #ifndef USE_MPI typedef MPIDummy MPI; #else // USE_MPI /** * MPI handling. * * Make sure only one instance of this class exists! */ class MPI : public MPIBasic { private: MPI_Comm m_comm; #ifdef ACL_DEVICE int m_localRank{}; int m_localSize{}; int m_deviceId{}; #endif // ACL_DEVICE private: MPI() : m_comm(MPI_COMM_NULL) { } public: ~MPI() { } #ifdef ACL_DEVICE private: /** * @brief Reads and returns environment variables * * Some MPI vendors usually provides env. variables which allows to find out the local rank and size * before calling MPI_Init(...). However, they tend to name these variables differently, i.e. uniquely * for their implementation. Thus, the function take some potential candidates and loop through them and try * to retrieve a value. * * @param candidates a vector of strings with names of possible env. variables * @throws std::string in case if a value cannot get retrieved from a candidate list * @throws std::invalid_argument in case if an env. variable doesn't contain an integer, e.g. char, string, etc. * @throws std::out_of_range in case is an env. variable contains a value bigger that a size of integer * */ static int readValueFromEnvVariables(std::vector<std::string> &candidates) { char* valueStr = nullptr; for (auto envVar: candidates) { valueStr = std::getenv(envVar.c_str()); if (valueStr) break; } if (!valueStr) { std::stringstream stream; stream << "could not detect any env. variable from a list of candidates, namely: "; for (const auto& item: candidates) { stream << item << ", "; } stream << ". Please, consider to use any other MPI implementation with an offloading support."; logError() << stream.str(); } return std::stoi(std::string(valueStr)); } public: /** * @brief Inits Device(s). * * Some MPI implementations create a so-called context between GPUs and OS Processes inside of MPI_Init(...). * It results in allocating some memory buffers in memory attached to the nearest NUMA domain * of a core where a process is running. In case of somebody wants to bind a processes in a different way, * e.g. move a process closer to a GPU, it must be done before calling MPI_Init(...) using env. variables * or hwloc library. * * Currently, the function does a simple binding, i.e. a local rank controls the corresponding devices. * For instance, localRank=2 is going to use deviceId=2. The user is responsible for the correct binding. * She/he must refer to a documentation of their job scheduler or MPI implementation to achieve correct * GPU/CPU affinity. Note, one can improve the current binding strategy using hwloc. * See, Professional CUDA programming, subsection Affinity on MPI-CUDA Programs as a reference. * * The function supports the following MPI implementations: OpenMPI, MVAPICH2, IntelMPI * */ void bindRankToDevice() { try { std::vector<std::string> rankEnvVars{{"OMPI_COMM_WORLD_LOCAL_RANK"}, {"MV2_COMM_WORLD_LOCAL_RANK"}, {"SLURM_LOCALID"}, {"PMI_RANK"} }; std::vector<std::string> sizeEnvVars{{"OMPI_COMM_WORLD_LOCAL_SIZE"}, {"MV2_COMM_WORLD_LOCAL_SIZE"}, {"SLURM_NTASKS_PER_NODE"}, {"PMI_SIZE"}}; m_localRank = readValueFromEnvVariables(rankEnvVars); m_localSize = readValueFromEnvVariables(sizeEnvVars); } catch (const std::invalid_argument &err) { logError() << err.what() << ". File: " << __FILE__ << ", line: " << __LINE__; } catch (const std::out_of_range& err) { logError() << err.what() << ". File: " << __FILE__ << ", line: " << __LINE__; } device::DeviceInstance& device = device::DeviceInstance::getInstance(); int m_numDevices = device.api->getNumDevices(); if (m_localSize > m_numDevices) { logError() << "Local mpi size (in a compute node) is greater than the number of avaliable devices." << "Over-subscription of devices is currently not supported in Seissol." << "Adjust num. local mpi rank and num. local devices.\n" << "File: " << __FILE__ << ", line: " << __LINE__; } m_deviceId = m_localRank; #ifdef _OPENMP #pragma omp parallel { #pragma omp critical { device.api->setDevice(m_deviceId); } } #else device.api->setDevice(m_deviceId); #endif } int getDeviceID() { return m_deviceId; } #endif // ACL_DEVICE /** * Initialize MPI */ void init(int &argc, char** &argv) { int required = (m_threadsafe ? MPI_THREAD_MULTIPLE : MPI_THREAD_SINGLE); int provided; MPI_Init_thread(&argc, &argv, required, &provided); setComm(MPI_COMM_WORLD); // Test this after setComm() to get the correct m_rank if (required < provided) logWarning(m_rank) << utils::nospace << "Required MPI thread support (" << required << ") is smaller than provided thread support (" << provided << ")."; } void setComm(MPI_Comm comm) { m_comm = comm; MPI_Comm_rank(comm, &m_rank); MPI_Comm_size(comm, &m_size); } /** * @return The main communicator for the application */ MPI_Comm comm() const { return m_comm; } void barrier(MPI_Comm comm) const { MPI_Barrier(comm); } /** * Finalize MPI */ void finalize() { fault.finalize(); MPI_Finalize(); } public: /** The only instance of the class */ static MPI mpi; }; #endif // USE_MPI } #endif // MPI_H
trmv_x_sky_n_lo_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { #ifdef COMPLEX const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); } for(ALPHA_INT c = 0; c < n; ++c) { const ALPHA_INT col_start = A->pointers[c]; const ALPHA_INT col_end = A->pointers[c + 1]; ALPHA_INT col_indx = 1; for(ALPHA_INT ai = col_start; ai < col_end; ++ai) { ALPHA_INT col_eles = col_end - col_start; ALPHA_INT r = c - col_eles + col_indx; ALPHA_Number t; alpha_mul_3c(t, alpha, A->values[ai]); alpha_madde(y[r], t, x[c]); col_indx ++; } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { #ifdef COMPLEX return ONAME_omp(alpha, A, x, beta, y); #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
ll_bfs_template.h
/* * ll_bfs_template.h * LLAMA Graph Analytics * * Copyright 2014 * The President and Fellows of Harvard College. * * Copyright 2014 * Oracle Labs. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This file was adapted from Green-Marl, which includes the following notice: * * Copyright (c) 2011-2012 Stanford University, unless otherwise specified. * All rights reserved. * * This software was developed by the Pervasive Parallelism Laboratory of * Stanford University, California, USA. * * Permission to use, copy, modify, and distribute this software in source or * binary form for any purpose with or without fee is hereby granted, provided * that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Stanford University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef LL_BFS_TEMPLATE_H #define LL_BFS_TEMPLATE_H #include <omp.h> #include <string.h> #include <map> #include <set> #include <unordered_set> #include <unordered_map> template<class Graph, typename level_t, bool use_multithread, bool has_navigator, bool use_reverse_edge, bool save_child> class ll_bfs_template { public: ll_bfs_template(Graph& _G) : G(_G) { visited_bitmap = NULL; // bitmap visited_level = NULL; thread_local_next_level = NULL; down_edge_array = NULL; down_edge_set = NULL; down_edge_array_w = NULL; if (save_child) { down_edge_set = new std::unordered_set<edge_t>(); } } virtual ~ll_bfs_template() { delete [] visited_bitmap; delete [] visited_level; delete [] thread_local_next_level; delete down_edge_set; if (down_edge_array != NULL) { #ifndef FORCE_L0 for (size_t i = 0; i < G.num_levels(); i++) delete[] down_edge_array[i]; #endif delete[] down_edge_array; } } void prepare(node_t root_node) { // TODO Is this correct? Do we need to poll a some sort of a runtime? prepare(root_node, omp_get_max_threads()); } void prepare(node_t root_node, int max_num_thread) { int num_thread; if (use_multithread) { num_thread = max_num_thread; } else { num_thread = 1; } max_threads = num_thread; is_finished = false; curr_level = 0; root = root_node; state = ST_SMALL; assert(root != LL_NIL_NODE); if (save_child) { if (down_edge_set == NULL) down_edge_set = new std::unordered_set<edge_t>(); } global_vector.clear(); level_queue_begin.clear(); level_count.clear(); // create local queues if (thread_local_next_level == NULL) { thread_local_next_level = new std::vector<node_t>[num_thread]; for (int i = 0; i < num_thread; i++) thread_local_next_level[i].reserve(THRESHOLD2); } else { for (int i = 0; i < num_thread; i++) thread_local_next_level[i].clear(); } } void do_bfs_forward() { //--------------------------------- // prepare root node //--------------------------------- curr_level = 0; curr_count = 0; next_count = 0; small_visited[root] = curr_level; curr_count++; global_vector.push_back(root); global_curr_level_begin = 0; global_next_level_begin = curr_count; level_count.push_back(curr_count); level_queue_begin.push_back(global_curr_level_begin); bool is_done = false; while (!is_done) { switch (state) { case ST_SMALL: { for (node_t i = 0; i < curr_count; i++) { node_t t = global_vector[global_curr_level_begin + i]; iterate_neighbor_small(t); visit_fw(t); // visit after iteration. in that way, one can check down-neighbors quite easily } break; } case ST_QUE: { if (use_multithread) // do it in parallel { int num_threads = std::min((node_t) max_threads, curr_count/128+1); #pragma omp parallel num_threads(num_threads) { int tid = omp_get_thread_num(); #pragma omp for nowait for (node_t i = 0; i < curr_count; i++) { node_t t = global_vector[global_curr_level_begin + i]; iterate_neighbor_que(t, tid); visit_fw(t); } finish_thread_que(tid); } } else { // do it in sequential int tid = 0; for (node_t i = 0; i < curr_count; i++) { //node_t t = global_curr_level[i]; node_t t = global_vector[global_curr_level_begin + i]; iterate_neighbor_que(t, tid); visit_fw(t); } finish_thread_que(tid); } break; } case ST_Q2R: { if (use_multithread) { // do it in parallel int num_threads = std::min((node_t) max_threads, curr_count/128+1); #pragma omp parallel num_threads(num_threads) { node_t local_cnt = 0; #pragma omp for nowait for (node_t i = 0; i < curr_count; i++) { node_t t = global_vector[global_curr_level_begin + i]; iterate_neighbor_rd(t, local_cnt); visit_fw(t); } finish_thread_rd(local_cnt); } } else { // do it sequentially node_t local_cnt = 0; for (node_t i = 0; i < curr_count; i++) { //node_t t = global_curr_level[i]; node_t t = global_vector[global_curr_level_begin + i]; iterate_neighbor_rd(t, local_cnt); visit_fw(t); } finish_thread_rd(local_cnt); } break; } case ST_RD: { if (use_multithread) { // do it in parallel #pragma omp parallel { node_t local_cnt = 0; #pragma omp for nowait schedule(dynamic,128) for (node_t t = 0; t < G.max_nodes(); t++) { if (visited_level[t] == curr_level) { iterate_neighbor_rd(t, local_cnt); visit_fw(t); } } finish_thread_rd(local_cnt); } } else { // do it in sequential node_t local_cnt = 0; for (node_t t = 0; t < G.max_nodes(); t++) { if (visited_level[t] == curr_level) { iterate_neighbor_rd(t, local_cnt); visit_fw(t); } } finish_thread_rd(local_cnt); } break; } case ST_R2Q: { if (use_multithread) { // do it in parallel #pragma omp parallel { int tid = omp_get_thread_num(); #pragma omp for nowait schedule(dynamic,128) for (node_t t = 0; t < G.max_nodes(); t++) { if (visited_level[t] == curr_level) { iterate_neighbor_que(t, tid); visit_fw(t); } } finish_thread_que(tid); } } else { int tid = 0; for (node_t t = 0; t < G.max_nodes(); t++) { if (visited_level[t] == curr_level) { iterate_neighbor_que(t, tid); visit_fw(t); } } finish_thread_que(tid); } break; } } // end of switch do_end_of_level_fw(); is_done = get_next_state(); } // end of while } void do_bfs_reverse() { // This function should be called only after do_bfs_foward has finished. // assumption: small-world graph level_t& level = curr_level; while (true) { node_t count = level_count[level]; //node_t* queue_ptr = level_start_ptr[level]; node_t* queue_ptr; node_t begin_idx = level_queue_begin[level]; if (begin_idx == -1) { queue_ptr = NULL; } else { queue_ptr = & (global_vector[begin_idx]); } if (queue_ptr == NULL) { #pragma omp parallel if (use_multithread) { #pragma omp for nowait schedule(dynamic,128) for (node_t i = 0; i < G.max_nodes(); i++) { if (visited_level[i] != curr_level) continue; visit_rv(i); } } } else { int num_threads = std::min((node_t) max_threads, curr_count/128+1); #pragma omp parallel num_threads(num_threads) if (use_multithread) { #pragma omp for nowait for (node_t i = 0; i < count; i++) { node_t u = queue_ptr[i]; visit_rv(u); } } } do_end_of_level_rv(); if (level == 0) break; level--; } } bool is_down_edge(edge_t idx) { if (state == ST_SMALL) return (down_edge_set->find(idx) != down_edge_set->end()); else { #ifdef FORCE_L0 return down_edge_array[idx]; #else size_t level = LL_EDGE_LEVEL(idx); if (level == LL_WRITABLE_LEVEL) { return down_edge_array_w[LL_EDGE_GET_WRITABLE(idx)->we_numerical_id]; } return down_edge_array[level][LL_EDGE_INDEX(idx)]; #endif } } protected: virtual void visit_fw(node_t t)=0; virtual void visit_rv(node_t t)=0; virtual bool check_navigator(node_t t, edge_t nx)=0; virtual void do_end_of_level_fw() { } virtual void do_end_of_level_rv() { } node_t get_root() { return root; } level_t get_level(node_t t) { // GCC expansion if (__builtin_expect((state == ST_SMALL), 0)) { if (small_visited.find(t) == small_visited.end()) return __INVALID_LEVEL; else return small_visited[t]; } else { return visited_level[t]; } } level_t get_curr_level() { return curr_level; } private: bool get_next_state() { //const char* state_name[5] = {"SMALL","QUEUE","Q2R","RD","R2Q"}; if (next_count == 0) return true; // BFS is finished int next_state = state; switch (state) { case ST_SMALL: if (next_count >= THRESHOLD1) { prepare_que(); next_state = ST_QUE; } break; case ST_QUE: if ((next_count >= THRESHOLD2) && (next_count >= curr_count*5)) { prepare_read(); next_state = ST_Q2R; } break; case ST_Q2R: next_state = ST_RD; break; case ST_RD: if (next_count <= (2 * curr_count)) { next_state = ST_R2Q; } break; case ST_R2Q: next_state = ST_QUE; break; } finish_level(state); state = next_state; return false; } void finish_level(int state) { if ((state == ST_RD) || (state == ST_Q2R)) { // output queue is not valid } else { // move output queue //node_t* temp = &(global_next_level[next_count]); //global_curr_level = global_next_level; //global_next_level = temp; global_curr_level_begin = global_next_level_begin; global_next_level_begin = global_next_level_begin + next_count; } curr_count = next_count; next_count = 0; curr_level++; // save 'new current' level status level_count.push_back(curr_count); if ((state == ST_RD) || (state == ST_Q2R)) { //level_start_ptr.push_back(NULL); level_queue_begin.push_back(-1); } else { //level_start_ptr.push_back(global_curr_level); level_queue_begin.push_back(global_curr_level_begin); } } void iter_begin(ll_edge_iterator& iter, node_t v) { if (use_reverse_edge) { G.in_iter_begin_fast(iter, v); } else { G.out_iter_begin(iter, v); } } edge_t iter_next(ll_edge_iterator& iter) { if (use_reverse_edge) { return G.in_iter_next_fast(iter); } else { return G.out_iter_next(iter); } } node_t get_node(ll_edge_iterator& iter) { return iter.last_node; } void iterate_neighbor_small(node_t t) { ll_edge_iterator iter; iter_begin(iter, t); for (edge_t nx = iter_next(iter); nx != LL_NIL_EDGE; nx = iter_next(iter)) { node_t u = get_node(iter); // check visited if (small_visited.find(u) == small_visited.end()) { if (has_navigator) { if (check_navigator(u, nx) == false) continue; } if (save_child) { save_down_edge_small(nx); } small_visited[u] = curr_level + 1; //global_next_level[next_count++] = u; global_vector.push_back(u); next_count++; } else if (save_child) { if (has_navigator) { if (check_navigator(u, nx) == false) continue; } if (small_visited[u] == (curr_level+1)){ save_down_edge_small(nx); } } } } // should be used only when save_child is enabled void save_down_edge_small(edge_t idx) { down_edge_set->insert(idx); } void save_down_edge_large(edge_t idx) { #ifdef FORCE_L0 down_edge_array[idx] = 1; #else size_t level = LL_EDGE_LEVEL(idx); if (level == LL_WRITABLE_LEVEL) { down_edge_array_w[LL_EDGE_GET_WRITABLE(idx)->we_numerical_id] = 1; } down_edge_array[LL_EDGE_LEVEL(idx)][LL_EDGE_INDEX(idx)] = 1; #endif } void prepare_que() { global_vector.reserve(G.max_nodes()); // create bitmap and edges if (visited_bitmap == NULL) { visited_bitmap = new unsigned char[(G.max_nodes() + 7) / 8]; visited_level = new level_t[G.max_nodes()]; } if (save_child) { if (down_edge_array == NULL) { #ifdef FORCE_L0 down_edge_array = new unsigned char [G.max_edges(0)]; #else down_edge_array = new unsigned char* [G.num_levels()]; for (size_t i = 0; i < G.num_levels(); i++) down_edge_array[i] = new unsigned char [G.max_edges(i)]; // Note: This makes sense only if the current graph is writable, // but fortunatelly it is never accessed unless we are on the // writable level down_edge_array_w = down_edge_array[G.num_levels() - 1]; #endif } } if (use_multithread) { #pragma omp parallel { #pragma omp for nowait for (node_t i = 0; i < (G.max_nodes() + 7) / 8; i++) visited_bitmap[i] = 0; #pragma omp for nowait for (node_t i = 0; i < G.max_nodes(); i++) visited_level[i] = __INVALID_LEVEL; if (save_child) { #ifdef FORCE_L0 #pragma omp for nowait for (edge_t i = 0; i < G.max_edges(0); i++) down_edge_array[i] = 0; #else #pragma omp for nowait for (size_t i = 0; i < G.num_levels(); i++) memset(down_edge_array[i], 0, sizeof(unsigned char) * G.max_edges(i)); #endif } } } else { for (node_t i = 0; i < (G.max_nodes() + 7) / 8; i++) visited_bitmap[i] = 0; for (node_t i = 0; i < G.max_nodes(); i++) visited_level[i] = __INVALID_LEVEL; if (save_child) { #ifdef FORCE_L0 for (edge_t i = 0; i < G.max_edges(0); i++) down_edge_array[i] = 0; #else for (size_t i = 0; i < G.num_levels(); i++) memset(down_edge_array[i], 0, sizeof(unsigned char) * G.max_edges(i)); #endif } } //typename std::unordered_map<node_t, level_t>::iterator II; typename std::map<node_t, level_t>::iterator II; for (II = small_visited.begin(); II != small_visited.end(); II++) { node_t u = II->first; level_t lev = II->second; _ll_set_bit(visited_bitmap, u); visited_level[u] = lev; } if (save_child) { typename std::unordered_set<edge_t>::iterator J; for (J = down_edge_set->begin(); J != down_edge_set->end(); J++) { edge_t idx = *J; #ifdef FORCE_L0 down_edge_array[idx] = 1; #else size_t level = LL_EDGE_LEVEL(idx); if (level == LL_WRITABLE_LEVEL) { down_edge_array_w[LL_EDGE_GET_WRITABLE(idx)->we_numerical_id] = 1; } down_edge_array[level][LL_EDGE_INDEX(idx)] = 1; #endif } } } void iterate_neighbor_que(node_t t, int tid) { ll_edge_iterator iter; iter_begin(iter, t); for (edge_t nx = iter_next(iter); nx != LL_NIL_EDGE; nx = iter_next(iter)) { node_t u = get_node(iter); assert(u >= 0 && u < G.max_nodes()); // check visited bitmap // test & test& set if (_ll_get_bit(visited_bitmap, u) == 0) { if (has_navigator) { if (check_navigator(u, nx) == false) continue; } bool re_check_result; if (use_multithread) { re_check_result = _ll_set_bit_atomic(visited_bitmap, u); } else { re_check_result = true; _ll_set_bit(visited_bitmap, u); } if (save_child) { save_down_edge_large(nx); } if (re_check_result) { // add to local q thread_local_next_level[tid].push_back(u); visited_level[u] = (curr_level + 1); } } else if (save_child) { if (has_navigator) { if (check_navigator(u, nx) == false) continue; } if (visited_level[u] == (curr_level +1)) { save_down_edge_large(nx); } } } } void finish_thread_que(int tid) { node_t local_cnt = thread_local_next_level[tid].size(); //copy curr_cnt to next_cnt if (local_cnt > 0) { node_t old_idx = __sync_fetch_and_add(&next_count, local_cnt); // copy to global vector memcpy(&(global_vector[global_next_level_begin + old_idx]), &(thread_local_next_level[tid][0]), local_cnt * sizeof(node_t)); } thread_local_next_level[tid].clear(); } void prepare_read() { // nothing to do } void iterate_neighbor_rd(node_t t, node_t& local_cnt) { ll_edge_iterator iter; iter_begin(iter, t); for (edge_t nx = iter_next(iter); nx != LL_NIL_EDGE; nx = iter_next(iter)) { node_t u = get_node(iter); // check visited bitmap // test & test& set if (_ll_get_bit(visited_bitmap, u) == 0) { if (has_navigator) { if (check_navigator(u, nx) == false) continue; } bool re_check_result; if (use_multithread) { re_check_result = _ll_set_bit_atomic(visited_bitmap, u); } else { re_check_result = true; _ll_set_bit(visited_bitmap, u); } if (save_child) { save_down_edge_large(nx); } if (re_check_result) { // add to local q visited_level[u] = curr_level + 1; local_cnt++; } } else if (save_child) { if (has_navigator) { if (check_navigator(u, nx) == false) continue; } if (visited_level[u] == (curr_level +1)) { save_down_edge_large(nx); } } } } void finish_thread_rd(node_t local_cnt) { __sync_fetch_and_add(&next_count, local_cnt); } //----------------------------------------------------- //----------------------------------------------------- static const int ST_SMALL = 0; static const int ST_QUE = 1; static const int ST_Q2R = 2; static const int ST_RD = 3; static const int ST_R2Q = 4; static const int THRESHOLD1 = 128; // single threaded static const int THRESHOLD2 = 1024; // move to RD-based // not -1. //(why? because curr_level-1 might be -1, when curr_level = 0) static const level_t __INVALID_LEVEL = -2; int state; unsigned char* visited_bitmap; // bitmap level_t* visited_level; // assumption: small_world graph bool is_finished; level_t curr_level; node_t root; Graph& G; node_t curr_count; node_t next_count; //std::unordered_map<node_t, level_t> small_visited; std::map<node_t, level_t> small_visited; std::unordered_set<edge_t>* down_edge_set; unsigned char* down_edge_array_w; #ifdef FORCE_L0 unsigned char* down_edge_array; #else unsigned char** down_edge_array; #endif //node_t* global_next_level; //node_t* global_curr_level; //node_t* global_queue; std::vector<node_t> global_vector; node_t global_curr_level_begin; node_t global_next_level_begin; //std::vector<node_t*> level_start_ptr; std::vector<node_t> level_queue_begin; std::vector<node_t> level_count; std::vector<node_t>* thread_local_next_level; int max_threads; }; #endif
parallel_macros.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2013, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // Utility macros for parallelism. // ========================================================================== #ifndef SEQAN_PARALLEL_PARALLEL_MACROS_H_ #define SEQAN_PARALLEL_PARALLEL_MACROS_H_ /*! * @macro SEQAN_OMP_PRAGMA * @brief Portable conditional <tt>#pragma</tt> issuing if OpenMP is enabled. * * @signature SEQAN_OMP_PRAGMA(x) * * @param x The string to issue behind <tt>#pragma omp</tt>. * * @section Remarks * * This macro uses portable pragma generation, dependent on the macro <tt>_OPENMP</tt> being defined (as by * the OpenMP standard). * * This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP or when OpenMP is not enabled to * suppress warnings. * * @section Example * * Parallelize loop with OpenMP if OpenMP is enabled: * * @code{.cpp} * SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for * for (int i = 0; i < x; ++i) * { * // Do work. * } * @endcode * * Make an addition atomic if OpenMP is enabled: * * @code{.cpp} * SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic * i += 1; * @endcode */ /** .Macro.SEQAN_OMP_PRAGMA ..summary:Portable conditional $#pragma$ issuing if OpenMP is enabled. ..cat:Parallelism ..signature:SEQAN_OMP_PRAGMA(x) ..param.x:The string to issue behind $#pragma omp$. ..remarks:This macro uses portable pragma generation, dependent on the macro $_OPENMP$ being defined (as by the OpenMP standard). ..remarks:This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP to suppress warnings. ..example.text:Parallelize loop with OpenMP if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for for (int i = 0; i < x; ++i) { // Do work. } ..example.text:Make an addition atomic if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic i += 1; */ #ifdef _OPENMP #include <omp.h> #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // GCC _Pragma operator #define SEQAN_DO_PRAGMA(x) _Pragma(#x) #define SEQAN_OMP_PRAGMA(x) SEQAN_DO_PRAGMA(omp x) #else // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // MSVC __pragma-operator #define SEQAN_OMP_PRAGMA(x) __pragma (omp x) #endif // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) #else // #ifdef _OPENMP #define SEQAN_OMP_PRAGMA(x) // low-level OpenMP runtime compatibility inline void omp_set_num_threads(int) {} inline int omp_get_num_threads() { return 1; } inline int omp_get_max_threads() { return 1; } inline int omp_get_thread_num() { return 0; } #endif // #ifdef _OPENMP #endif // SEQAN_PARALLEL_PARALLEL_MACROS_H_
GB_binop__rdiv_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int32) // A*D function (colscale): GB (_AxD__rdiv_int32) // D*A function (rowscale): GB (_DxB__rdiv_int32) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int32) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int32) // C=scalar+B GB (_bind1st__rdiv_int32) // C=scalar+B' GB (_bind1st_tran__rdiv_int32) // C=A+scalar GB (_bind2nd__rdiv_int32) // C=A'+scalar GB (_bind2nd_tran__rdiv_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT32 || GxB_NO_RDIV_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 32) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 32) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_int16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_int32 // op(A') function: GB_tran__minv_int16_int32 // C type: int16_t // A type: int32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_int32 ( int16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
graph.h
#pragma once #include "util/timer.h" #include <sys/time.h> #include <sys/mman.h> #include <fcntl.h> #include <unordered_map> #include <vector> #include <chrono> #include <fstream> #include "util/util.h" #include "util/log/log.h" using namespace std; typedef int vid_t; //typedef unsigned int eid_t; typedef size_t eid_t; typedef struct { long n; long m; vid_t *adj; eid_t *num_edges; eid_t *eid; } graph_t; //Define an Edge data type struct Edge { vid_t u; vid_t v; Edge() { this->u = 0; this->v = 0; } Edge(vid_t u, vid_t v) { this->u = u; this->v = v; } }; void free_graph(graph_t *g); void getEidAndEdgeList(graph_t *g, Edge *idToEdge); template<typename T> struct Graph { string dir; uint32_t nodemax; T edgemax; // csr representation T *node_off; int *edge_dst; vector<int> degree; explicit Graph(char *dir_cstr); public: void ReadDegree(); void CheckInputGraph(); void ReadAdjacencyList(); }; template<typename T> Graph<T>::Graph(char *dir_cstr) { dir = string(dir_cstr); // clear the 4 bytes edgemax = 0; ReadDegree(); ReadAdjacencyList(); CheckInputGraph(); } using namespace std::chrono; template<typename T> void Graph<T>::ReadDegree() { auto start = high_resolution_clock::now(); ifstream deg_file(dir + string("/b_degree.bin"), ios::binary); int int_size = 0; deg_file.read(reinterpret_cast<char *>(&int_size), 4); if (int_size != sizeof(T)) { log_warn("int_size != sizeof(T), %d, %d", int_size, sizeof(T)); } deg_file.read(reinterpret_cast<char *>(&nodemax), sizeof(int)); deg_file.read(reinterpret_cast<char *>(&edgemax), int_size); log_info("int size: %d, n: %s, m: %s", int_size, FormatWithCommas(nodemax).c_str(), FormatWithCommas(edgemax).c_str()); degree.resize(static_cast<unsigned long>(nodemax)); deg_file.read(reinterpret_cast<char *>(&degree.front()), sizeof(int) * nodemax); auto end = high_resolution_clock::now(); log_info("read degree file time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0); } template<typename T> void Graph<T>::ReadAdjacencyList() { auto start = high_resolution_clock::now(); ifstream adj_file(dir + string("/b_adj.bin"), ios::binary); // csr representation node_off = (T *) malloc(sizeof(T) * (nodemax + 1)); edge_dst = static_cast<int *>(malloc(sizeof(int) * static_cast<uint64_t>(edgemax + 16))); string dst_v_file_name = dir + string("/b_adj.bin"); auto dst_v_fd = open(dst_v_file_name.c_str(), O_RDONLY, S_IRUSR | S_IWUSR); int *buffer = (int *) mmap(0, static_cast<uint64_t >(edgemax) * 4u, PROT_READ, MAP_PRIVATE, dst_v_fd, 0); // prefix sum node_off[0] = 0; for (auto i = 0u; i < nodemax; i++) { node_off[i + 1] = node_off[i] + degree[i]; } auto end = high_resolution_clock::now(); log_info("malloc, and sequential-scan time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0); // load dst vertices into the array #pragma omp parallel for schedule(dynamic, 1000) for (auto i = 0u; i < nodemax; i++) { // copy to the high memory bandwidth mem for (uint64_t offset = node_off[i]; offset < node_off[i + 1]; offset++) { edge_dst[offset] = buffer[offset]; } // inclusive degree[i]++; } munmap(buffer, static_cast<uint64_t >(edgemax) * 4u); #ifdef VERIFY_INPUT // Verify. #pragma omp parallel for schedule(dynamic, 1000) for (auto u = 0u; u < nodemax; u++) { for (size_t offset = node_off[u]; offset < node_off[u + 1]; offset++) { auto v = edge_dst[offset]; if (BranchFreeBinarySearch(edge_dst, node_off[v], node_off[v + 1], (int) u) == node_off[v + 1]) { log_fatal("CSR not correct..."); exit(-1); } } } log_info("CSR verify pass"); #endif auto end2 = high_resolution_clock::now(); log_info("read adjacency list file time: %.3lf s", duration_cast<milliseconds>(end2 - end).count() / 1000.0); } template<typename T> void Graph<T>::CheckInputGraph() { auto start = high_resolution_clock::now(); #pragma omp parallel for schedule(dynamic, 5000) for (auto i = 0u; i < nodemax; i++) { for (auto j = node_off[i]; j < node_off[i + 1]; j++) { if (edge_dst[j] == static_cast<int>(i)) { log_error("Self loop of v: %d", i); exit(1); } if (j > node_off[i] && edge_dst[j] <= edge_dst[j - 1]) { log_error("Edges not sorted in increasing id order!\nThe program may not run properly!"); exit(1); } } } auto end = high_resolution_clock::now(); log_info("check input graph file time: %.3lf s", duration_cast<milliseconds>(end - start).count() / 1000.0); } double timer();
partial.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildPartialExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildPartialExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] -= hypre_MPI_Wtime(); #endif /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; /*HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL;*/ HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *old_coarse_to_fine = NULL; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn; /* Variables to keep count of interpolatory points */ /*HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter, coarse_counter_offd; */ HYPRE_Int n_coarse_old; HYPRE_BigInt total_old_global_cpts; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; /*HYPRE_Int strong_f_marker = -2;*/ /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int cnt, old_cnt; HYPRE_Int start_indexing = 0; HYPRE_Int i; /*HYPRE_Int i, ii, i1, i2, j, jj, kk, k1, jj1;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; HYPRE_Int max_num_threads; HYPRE_Int *P_diag_array = NULL; HYPRE_Int *P_offd_array = NULL; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); max_num_threads = hypre_NumThreads(); my_first_cpt = num_cpts_global[0]; /*my_first_old_cpt = num_old_cpts_global[0];*/ n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]); /*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/ if (my_id == (num_procs -1)) { total_global_cpts = num_cpts_global[1]; total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { if (hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1)) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); if (n_fine) { old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /*P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); */ } if (full_off_procNodes) { /*P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);*/ fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } /*hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd);*/ for (i=0; i < full_off_procNodes; i++) { fine_to_coarse_offd[i] = -1; tmp_CF_marker_offd[i] = -1; } cnt = 0; old_cnt = 0; for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt++; old_coarse_to_fine[old_cnt++] = i; } else if (CF_marker[i] == -2) { old_coarse_to_fine[old_cnt++] = i; } } P_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST); P_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i, diagonal, distribute, sgn, sum) #endif { HYPRE_Int ii, jj_counter, jj_counter_offd, jj, kk, i1, i2, k1, jj1; HYPRE_BigInt big_k1; HYPRE_Int loc_col, jj_begin_row, jj_begin_row_offd; HYPRE_Int jj_end_row, jj_end_row_offd, strong_f_marker; HYPRE_Int size, rest, ne, ns; HYPRE_Int num_threads, my_thread_num; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; strong_f_marker = -2; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); size = n_coarse_old/num_threads; rest = n_coarse_old - size*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(size+1); ne = (my_thread_num+1)*(size+1); } else { ns = my_thread_num*size+rest; ne = (my_thread_num+1)*size+rest; } if (n_fine) P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (ii=0; ii < n_fine; ii++) P_marker[ii] = -1; if (full_off_procNodes) P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); for (ii=0; ii < full_off_procNodes; ii++) P_marker_offd[ii] = -1; /*coarse_counter = 0; coarse_counter_offd = 0;*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; for (ii = ns; ii < ne; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; /*P_diag_i[ii] = jj_counter; if (num_procs > 1) P_offd_i[ii] = jj_counter_offd;*/ i = old_coarse_to_fine[ii]; if (CF_marker[i] > 0) { jj_counter++; /*coarse_counter++;*/ } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } P_diag_array[my_thread_num] = jj_counter; P_offd_array[my_thread_num] = jj_counter_offd; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); for (i=0; i < max_num_threads; i++) { P_diag_array[i+1] += P_diag_array[i]; P_offd_array[i+1] += P_offd_array[i]; } P_diag_size = P_diag_array[max_num_threads]; P_offd_size = P_offd_array[max_num_threads]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_coarse_old] = P_diag_size; P_offd_i[n_coarse_old] = P_offd_size; /* Fine to coarse mapping */ if(num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif jj_counter = start_indexing; jj_counter_offd = start_indexing; if (my_thread_num) { jj_counter = P_diag_array[my_thread_num-1]; jj_counter_offd = P_offd_array[my_thread_num-1]; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (ii = ns; ii < ne; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; P_diag_i[ii] = jj_counter; P_offd_i[ii] = jj_counter_offd; i = old_coarse_to_fine[ii]; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if(big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if(i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if(loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } /* end parallel region */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, 0, P_diag_i[n_coarse_old], P_offd_i[n_coarse_old]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_coarse_old]; P_offd_size = P_offd_i[n_coarse_old]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] < -1) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_array, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if(num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildPartialStdInterp * Comment: The interpolatory weighting can be changed with the sep_weight * variable. This can enable not separating negative and positive * off diagonals in the weight formula. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildPartialStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int sep_weight, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *old_coarse_to_fine = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; HYPRE_Int n_coarse_old; HYPRE_BigInt total_old_global_cpts; HYPRE_Int *ihat = NULL; HYPRE_Int *ihat_offd = NULL; HYPRE_Int *ipnt = NULL; HYPRE_Int *ipnt_offd = NULL; HYPRE_Int strong_f_marker = -2; /* Interpolation weight variables */ HYPRE_Real *ahat = NULL; HYPRE_Real *ahat_offd = NULL; HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C; HYPRE_Real diagonal, distribute; HYPRE_Real alfa, beta; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int cnt, old_cnt; HYPRE_Int start_indexing = 0; HYPRE_Int i, ii, i1, j1, jj, kk, k1; HYPRE_BigInt big_k1; HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; HYPRE_Real wall_1 = 0; HYPRE_Real wall_2 = 0; HYPRE_Real wall_3 = 0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag== 4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; /*my_first_old_cpt = num_old_cpts_global[0];*/ n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]); /*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/ if (my_id == (num_procs -1)) { total_global_cpts = num_cpts_global[1]; total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { if (hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 0)) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); if (n_fine) { old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; cnt = 0; old_cnt = 0; for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt++; old_coarse_to_fine[old_cnt++] = i; } else if (CF_marker[i] == -2) { old_coarse_to_fine[old_cnt++] = i; } } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { P_diag_i[ii] = jj_counter; if (num_procs > 1) P_offd_i[ii] = jj_counter_offd; i = old_coarse_to_fine[ii]; if (CF_marker[i] > 0) { jj_counter++; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[ii]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[ii]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[ii]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < P_offd_i[ii]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if(CF_marker[loc_col] >= 0) { if(P_marker[loc_col] < P_diag_i[ii]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(CF_marker_offd[loc_col] >= 0) { if(P_marker_offd[loc_col] < P_offd_i[ii]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_coarse_old] = jj_counter; P_offd_i[n_coarse_old] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } /* Initialize ahat, which is a modification to a, used in the standard * interpolation routine. */ if (n_fine) { ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST); ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; ahat[i] = 0; ihat[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; ahat_offd[i] = 0; ihat_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; i = old_coarse_to_fine[ii]; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = i1; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = k1; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if(CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd]=i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if(CF_marker[loc_col] > 0) { if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = loc_col; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(CF_marker_offd[loc_col] > 0) { if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_1 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); cnt_c = 0; cnt_f = jj_end_row-jj_begin_row; cnt_c_offd = 0; cnt_f_offd = jj_end_row_offd-jj_begin_row_offd; ihat[i] = cnt_f; ipnt[cnt_f] = i; ahat[cnt_f++] = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is direct neighbor */ i1 = A_diag_j[jj]; if (P_marker[i1] != strong_f_marker) { indx = ihat[i1]; if (indx > -1) ahat[indx] += A_diag_data[jj]; else if (P_marker[i1] >= jj_begin_row) { ihat[i1] = cnt_c; ipnt[cnt_c] = i1; ahat[cnt_c++] += A_diag_data[jj]; } else if (CF_marker[i1] != -3) { ihat[i1] = cnt_f; ipnt[cnt_f] = i1; ahat[cnt_f++] += A_diag_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func[i1]) { distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]]; for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++) { k1 = A_diag_j[kk]; indx = ihat[k1]; if (indx > -1) ahat[indx] -= A_diag_data[kk]*distribute; else if (P_marker[k1] >= jj_begin_row) { ihat[k1] = cnt_c; ipnt[cnt_c] = k1; ahat[cnt_c++] -= A_diag_data[kk]*distribute; } else { ihat[k1] = cnt_f; ipnt[cnt_f] = k1; ahat[cnt_f++] -= A_diag_data[kk]*distribute; } } if(num_procs > 1) { for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++) { k1 = A_offd_j[kk]; indx = ihat_offd[k1]; if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1]) { if (indx > -1) ahat_offd[indx] -= A_offd_data[kk]*distribute; else if (P_marker_offd[k1] >= jj_begin_row_offd) { ihat_offd[k1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = k1; ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute; } else { ihat_offd[k1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = k1; ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute; } } } } } } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] != strong_f_marker) { indx = ihat_offd[i1]; if (indx > -1) ahat_offd[indx] += A_offd_data[jj]; else if (P_marker_offd[i1] >= jj_begin_row_offd) { ihat_offd[i1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = i1; ahat_offd[cnt_c_offd++] += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { ihat_offd[i1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = i1; ahat_offd[cnt_f_offd++] += A_offd_data[jj]; } } else { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]]; for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++) { big_k1 = A_ext_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /*diag*/ loc_col = (HYPRE_Int)(big_k1 - col_1); indx = ihat[loc_col]; if (indx > -1) ahat[indx] -= A_ext_data[kk]*distribute; else if (P_marker[loc_col] >= jj_begin_row) { ihat[loc_col] = cnt_c; ipnt[cnt_c] = loc_col; ahat[cnt_c++] -= A_ext_data[kk]*distribute; } else { ihat[loc_col] = cnt_f; ipnt[cnt_f] = loc_col; ahat[cnt_f++] -= A_ext_data[kk]*distribute; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1]) { indx = ihat_offd[loc_col]; if (indx > -1) ahat_offd[indx] -= A_ext_data[kk]*distribute; else if(P_marker_offd[loc_col] >= jj_begin_row_offd) { ihat_offd[loc_col] = cnt_c_offd; ipnt_offd[cnt_c_offd] = loc_col; ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute; } else { ihat_offd[loc_col] = cnt_f_offd; ipnt_offd[cnt_f_offd] = loc_col; ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_2 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); diagonal = ahat[cnt_c]; ahat[cnt_c] = 0; sum_pos = 0; sum_pos_C = 0; sum_neg = 0; sum_neg_C = 0; sum = 0; sum_C = 0; if(sep_weight == 1) { for (jj=0; jj < cnt_c; jj++) { if (ahat[jj] > 0) { sum_pos_C += ahat[jj]; } else { sum_neg_C += ahat[jj]; } } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos_C += ahat_offd[jj]; } else { sum_neg_C += ahat_offd[jj]; } } } sum_pos = sum_pos_C; sum_neg = sum_neg_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { if (ahat[jj] > 0) { sum_pos += ahat[jj]; } else { sum_neg += ahat[jj]; } ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos += ahat_offd[jj]; } else { sum_neg += ahat_offd[jj]; } ahat_offd[jj] = 0; } } if (sum_neg_C*diagonal) alfa = sum_neg/sum_neg_C/diagonal; if (sum_pos_C*diagonal) beta = sum_pos/sum_pos_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; if (ahat[j1] > 0) P_diag_data[jj] = -beta*ahat[j1]; else P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; if (ahat_offd[j1] > 0) P_offd_data[jj] = -beta*ahat_offd[j1]; else P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } else { for (jj=0; jj < cnt_c; jj++) { sum_C += ahat[jj]; } if(num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { sum_C += ahat_offd[jj]; } } sum = sum_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { sum += ahat[jj]; ahat[jj] = 0; } if(num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { sum += ahat_offd[jj]; ahat_offd[jj] = 0; } } if (sum_C*diagonal) alfa = sum/sum_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if(num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_3 += wall_time; fflush(NULL); } } } if (debug_flag==4) { hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n", my_id, wall_1, wall_2, wall_3); fflush(NULL); } P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, 0, P_diag_i[n_coarse_old], P_offd_i[n_coarse_old]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_coarse_old]; P_offd_size = P_offd_i[n_coarse_old]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] < -1) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(ahat, HYPRE_MEMORY_HOST); hypre_TFree(ihat, HYPRE_MEMORY_HOST); hypre_TFree(ipnt, HYPRE_MEMORY_HOST); if (full_off_procNodes) { hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if(num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildPartialExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildPartialExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_BigInt *num_old_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *old_coarse_to_fine = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; HYPRE_Int n_coarse_old; HYPRE_BigInt total_old_global_cpts; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int cnt, old_cnt; HYPRE_Int start_indexing = 0; HYPRE_Int i, ii, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; /*my_first_old_cpt = num_old_cpts_global[0];*/ n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]); /*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/ if (my_id == (num_procs -1)) { total_global_cpts = num_cpts_global[1]; total_old_global_cpts = num_old_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { if (hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1)) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST); if (n_fine) { old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; cnt = 0; old_cnt = 0; for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt++; old_coarse_to_fine[old_cnt++] = i; } else if (CF_marker[i] == -2) { old_coarse_to_fine[old_cnt++] = i; } } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { P_diag_i[ii] = jj_counter; if (num_procs > 1) P_offd_i[ii] = jj_counter_offd; i = old_coarse_to_fine[ii]; if (CF_marker[i] > 0) { jj_counter++; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[ii]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if(P_marker[k1] < P_diag_i[ii]) { P_marker[k1] = jj_counter; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if(P_marker_offd[k1] < P_offd_i[ii]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { if(P_marker_offd[i1] < P_offd_i[ii]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if(big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < P_diag_i[ii]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < P_offd_i[ii]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_coarse_old] = jj_counter; P_offd_i[n_coarse_old] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if(num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (ii = 0; ii < n_coarse_old; ii++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; i = old_coarse_to_fine[ii]; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] == -2) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if(P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if(num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { k1 = S_offd_j[kk]; if(CF_marker_offd[k1] >= 0) { if(P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] >= 0) { if(P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if(big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if(P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if(P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if((P_marker[i2] >= jj_begin_row) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if(sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if(num_procs > 1) { for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if(P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if(num_procs > 1) { for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if(P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if(P_marker_offd[i1] == strong_f_marker) { sum = zero; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row ) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) sum += A_ext_data[jj1]; } } if(sum != 0) { distribute = A_offd_data[jj] / sum; for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if(big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if(P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if(P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if(num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for(jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, total_old_global_cpts, total_global_cpts, num_old_cpts_global, num_cpts_global, 0, P_diag_i[n_coarse_old], P_offd_i[n_coarse_old]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_coarse_old]; P_offd_size = P_offd_i[n_coarse_old]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if(P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] < -1) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if(num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; }
GB_unaryop__lnot_int16_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_int16 // op(A') function: GB_tran__lnot_int16_int16 // C type: int16_t // A type: int16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_int16 ( int16_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
no_option_no_warn.c
// RUN: %clang_cc1 -verify -Wno-source-uses-openmp -o - %s // expected-no-diagnostics int a; #pragma omp threadprivate(a,b) #pragma omp parallel
GB_binop__lor_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_bool) // A.*B function (eWiseMult): GB (_AemultB_08__lor_bool) // A.*B function (eWiseMult): GB (_AemultB_02__lor_bool) // A.*B function (eWiseMult): GB (_AemultB_04__lor_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_bool) // A*D function (colscale): GB (_AxD__lor_bool) // D*A function (rowscale): GB (_DxB__lor_bool) // C+=B function (dense accum): GB (_Cdense_accumB__lor_bool) // C+=b function (dense accum): GB (_Cdense_accumb__lor_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_bool) // C=scalar+B GB (_bind1st__lor_bool) // C=scalar+B' GB (_bind1st_tran__lor_bool) // C=A+scalar GB (_bind2nd__lor_bool) // C=A'+scalar GB (_bind2nd_tran__lor_bool) // C type: bool // A type: bool // A pattern? 0 // B type: bool // B pattern? 0 // BinaryOp: cij = (aij || bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x || y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_BOOL || GxB_NO_LOR_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x || bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij || y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x || aij) ; \ } GrB_Info GB (_bind1st_tran__lor_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij || y) ; \ } GrB_Info GB (_bind2nd_tran__lor_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
libsvm_parser.h
/*! * Copyright (c) 2015 by Contributors * \file libsvm_parser.h * \brief iterator parser to parse libsvm format * \author Tianqi Chen */ #ifndef XGBOOST_IO_LIBSVM_PARSER_H_ #define XGBOOST_IO_LIBSVM_PARSER_H_ #define NOMINMAX #include <vector> #include <cstring> #include <cctype> #include <algorithm> #include "../utils/omp.h" #include "../utils/utils.h" #include "../sync/sync.h" #include "../utils/thread_buffer.h" #include "./sparse_batch_page.h" namespace xgboost { namespace io { /*! \brief page returned by libsvm parser */ struct LibSVMPage : public SparsePage { std::vector<float> label; // overload clear inline void Clear() { SparsePage::Clear(); label.clear(); } }; /*! * \brief libsvm parser that parses the input lines * and returns rows in input data * factory that was used by threadbuffer template */ class LibSVMPageFactory { public: LibSVMPageFactory() : bytes_read_(0), at_head_(true) { } inline bool Init(void) { return true; } inline void Setup(dmlc::InputSplit *source, int nthread) { source_ = source; int maxthread; #pragma omp parallel { maxthread = omp_get_num_procs(); } maxthread = std::max(maxthread / 2, 1); nthread_ = std::min(maxthread, nthread); } inline void SetParam(const char *name, const char *val) {} inline bool LoadNext(std::vector<LibSVMPage> *data) { return FillData(data); } inline void FreeSpace(std::vector<LibSVMPage> *a) { delete a; } inline std::vector<LibSVMPage> *Create(void) { return new std::vector<LibSVMPage>(); } inline void BeforeFirst(void) { utils::Assert(at_head_, "cannot call beforefirst"); } inline void Destroy(void) { delete source_; } inline size_t bytes_read(void) const { return bytes_read_; } protected: inline bool FillData(std::vector<LibSVMPage> *data) { dmlc::InputSplit::Blob chunk; if (!source_->NextChunk(&chunk)) return false; int nthread; #pragma omp parallel num_threads(nthread_) { nthread = omp_get_num_threads(); } // reserve space for data data->resize(nthread); bytes_read_ += chunk.size; utils::Assert(chunk.size != 0, "LibSVMParser.FileData"); char *head = reinterpret_cast<char*>(chunk.dptr); #pragma omp parallel num_threads(nthread_) { // threadid int tid = omp_get_thread_num(); size_t nstep = (chunk.size + nthread - 1) / nthread; size_t sbegin = std::min(tid * nstep, chunk.size); size_t send = std::min((tid + 1) * nstep, chunk.size); char *pbegin = BackFindEndLine(head + sbegin, head); char *pend; if (tid + 1 == nthread) { pend = head + send; } else { pend = BackFindEndLine(head + send, head); } ParseBlock(pbegin, pend, &(*data)[tid]); } return true; } /*! * \brief parse data into out * \param begin beginning of buffer * \param end end of buffer */ inline void ParseBlock(char *begin, char *end, LibSVMPage *out) { using namespace std; out->Clear(); char *p = begin; while (p != end) { while (isspace(*p) && p != end) ++p; if (p == end) break; char *head = p; while (isdigit(*p) && p != end) ++p; if (*p == ':') { out->data.push_back(SparseBatch::Entry(atol(head), static_cast<bst_float>(atof(p + 1)))); } else { if (out->label.size() != 0) { out->offset.push_back(out->data.size()); } out->label.push_back(static_cast<float>(atof(head))); } while (!isspace(*p) && p != end) ++p; } if (out->label.size() != 0) { out->offset.push_back(out->data.size()); } utils::Check(out->label.size() + 1 == out->offset.size(), "LibSVMParser inconsistent"); } /*! * \brief start from bptr, go backward and find first endof line * \param bptr end position to go backward * \param begin the beginning position of buffer * \return position of first endof line going backward */ inline char* BackFindEndLine(char *bptr, char *begin) { for (; bptr != begin; --bptr) { if (*bptr == '\n' || *bptr == '\r') return bptr; } return begin; } private: // nthread int nthread_; // number of bytes readed size_t bytes_read_; // at beginning, at end of stream bool at_head_; // source split that provides the data dmlc::InputSplit *source_; }; class LibSVMParser : public utils::IIterator<LibSVMPage> { public: explicit LibSVMParser(dmlc::InputSplit *source, int nthread) : at_end_(false), data_ptr_(0), data_(NULL) { itr.SetParam("buffer_size", "2"); itr.get_factory().Setup(source, nthread); itr.Init(); } virtual void BeforeFirst(void) { itr.BeforeFirst(); } virtual bool Next(void) { if (at_end_) return false; while (true) { if (data_ == NULL || data_ptr_ >= data_->size()) { if (!itr.Next(data_)) { at_end_ = true; return false; } else { data_ptr_ = 0; } } while (data_ptr_ < data_->size()) { data_ptr_ += 1; if ((*data_)[data_ptr_ - 1].Size() != 0) { return true; } } } return true; } virtual const LibSVMPage &Value(void) const { return (*data_)[data_ptr_ - 1]; } inline size_t bytes_read(void) const { return itr.get_factory().bytes_read(); } private: bool at_end_; size_t data_ptr_; std::vector<LibSVMPage> *data_; utils::ThreadBuffer<std::vector<LibSVMPage>*, LibSVMPageFactory> itr; }; } // namespace io } // namespace xgboost #endif // XGBOOST_IO_LIBSVM_PARSER_H_
opt_sls_solver.h
/*++ Copyright (c) 2014 Microsoft Corporation Module Name: opt_sls_solver.h Abstract: Wraps a solver with SLS for improving a solution using an objective function. Author: Nikolaj Bjorner (nbjorner) 2014-4-18 Notes: --*/ #ifndef _OPT_SLS_SOLVER_H_ #define _OPT_SLS_SOLVER_H_ #include "solver_na2as.h" #include "card2bv_tactic.h" #include "nnf_tactic.h" #include "pb_sls.h" #include "bvsls_opt_engine.h" namespace opt { class sls_solver : public solver_na2as { ast_manager& m; ref<solver> m_solver; scoped_ptr<bvsls_opt_engine> m_bvsls; scoped_ptr<smt::pb_sls> m_pbsls; pb::card_pb_rewriter m_pb2bv; vector<rational> m_weights; expr_ref_vector m_soft; model_ref m_model; params_ref m_params; symbol m_engine; public: sls_solver(ast_manager & m, solver* s, expr_ref_vector const& soft, vector<rational> const& weights, params_ref & p): solver_na2as(m), m(m), m_solver(s), m_bvsls(0), m_pbsls(0), m_pb2bv(m), m_weights(weights), m_soft(soft) { updt_params(p); } virtual ~sls_solver() {} virtual void updt_params(params_ref & p) { m_solver->updt_params(p); m_params.copy(p); opt_params _p(p); m_engine = _p.sls_engine(); } virtual void collect_param_descrs(param_descrs & r) { m_solver->collect_param_descrs(r); } virtual void collect_statistics(statistics & st) const { m_solver->collect_statistics(st); if (m_bvsls) m_bvsls->collect_statistics(st); if (m_pbsls) m_pbsls->collect_statistics(st); } virtual void assert_expr(expr * t) { m_solver->assert_expr(t); } virtual void get_unsat_core(ptr_vector<expr> & r) { m_solver->get_unsat_core(r); } virtual void get_model(model_ref & m) { m = m_model; } virtual proof * get_proof() { return m_solver->get_proof(); } virtual std::string reason_unknown() const { return m_solver->reason_unknown(); } virtual void get_labels(svector<symbol> & r) { m_solver->get_labels(r); } virtual void set_cancel(bool f) { m_solver->set_cancel(f); m_pb2bv.set_cancel(f); #pragma omp critical (sls_solver) { if (m_bvsls) { m_bvsls->set_cancel(f); } if (m_pbsls) { m_pbsls->set_cancel(f); } } } virtual void set_progress_callback(progress_callback * callback) { m_solver->set_progress_callback(callback); } virtual unsigned get_num_assertions() const { return m_solver->get_num_assertions(); } virtual expr * get_assertion(unsigned idx) const { return m_solver->get_assertion(idx); } virtual void display(std::ostream & out) const { m_solver->display(out); // if (m_bvsls) m_bvsls->display(out); } void opt(model_ref& mdl) { if (m_engine == symbol("pb")) { pbsls_opt(mdl); } else { bvsls_opt(mdl); } } static expr_ref soft2bv(expr_ref_vector const& soft, vector<rational> const& weights) { ast_manager& m = soft.get_manager(); pb::card_pb_rewriter pb2bv(m); rational upper(1); expr_ref objective(m); for (unsigned i = 0; i < weights.size(); ++i) { upper += weights[i]; } expr_ref zero(m), tmp(m); bv_util bv(m); expr_ref_vector es(m); rational num = numerator(upper); rational den = denominator(upper); rational maxval = num*den; unsigned bv_size = maxval.get_num_bits(); zero = bv.mk_numeral(rational(0), bv_size); for (unsigned i = 0; i < soft.size(); ++i) { pb2bv(soft[i], tmp); es.push_back(m.mk_ite(tmp, bv.mk_numeral(den*weights[i], bv_size), zero)); } if (es.empty()) { objective = bv.mk_numeral(0, bv_size); } else { objective = es[0].get(); for (unsigned i = 1; i < es.size(); ++i) { objective = bv.mk_bv_add(objective, es[i].get()); } } return objective; } protected: typedef bvsls_opt_engine::optimization_result opt_result; virtual lbool check_sat_core(unsigned num_assumptions, expr * const * assumptions) { lbool r = m_solver->check_sat(num_assumptions, assumptions); if (r == l_true) { m_solver->get_model(m_model); opt(m_model); } return r; } virtual void push_core() { m_solver->push(); } virtual void pop_core(unsigned n) { m_solver->pop(n); } private: // convert soft constraints to bit-vector objective. void assertions2sls() { expr_ref tmp(m); goal_ref g(alloc(goal, m, true, false)); for (unsigned i = 0; i < m_solver->get_num_assertions(); ++i) { m_pb2bv(m_solver->get_assertion(i), tmp); g->assert_expr(tmp); } tactic_ref simplify = mk_nnf_tactic(m); proof_converter_ref pc; expr_dependency_ref core(m); goal_ref_buffer result; model_converter_ref model_converter; (*simplify)(g, result, model_converter, pc, core); SASSERT(result.size() == 1); goal* r = result[0]; for (unsigned i = 0; i < r->size(); ++i) { m_bvsls->assert_expr(r->form(i)); } } void pbsls_opt(model_ref& mdl) { #pragma omp critical (sls_solver) { if (m_pbsls) { m_pbsls->reset(); } else { m_pbsls = alloc(smt::pb_sls, m); } } m_pbsls->set_model(mdl); m_pbsls->updt_params(m_params); for (unsigned i = 0; i < m_solver->get_num_assertions(); ++i) { m_pbsls->add(m_solver->get_assertion(i)); } for (unsigned i = 0; i < m_soft.size(); ++i) { m_pbsls->add(m_soft[i].get(), m_weights[i]); } (*m_pbsls.get())(); m_pbsls->get_model(m_model); mdl = m_model.get(); } void bvsls_opt(model_ref& mdl) { #pragma omp critical (sls_solver) { m_bvsls = alloc(bvsls_opt_engine, m, m_params); } assertions2sls(); expr_ref objective = soft2bv(m_soft, m_weights); opt_result res(m); res.is_sat = l_undef; try { res = m_bvsls->optimize(objective, mdl, true); } catch (...) { } SASSERT(res.is_sat == l_true || res.is_sat == l_undef); if (res.is_sat == l_true) { m_bvsls->get_model(m_model); mdl = m_model.get(); } } }; } #endif
z_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" //--------------------------------------------------------------------- // this function performs the solution of the approximate factorization // step in the z-direction for all five matrix components // simultaneously. The Thomas algorithm is employed to solve the // systems for the z-lines. Boundary conditions are non-periodic //--------------------------------------------------------------------- void z_solve() { int i, j, k, k1, k2, m; double ru1, fac1, fac2; //kai // int k15; // consistent_data(&k15, "int", 1); //--------------------------------------------------------------------- // Prepare for z-solve, array redistribution //--------------------------------------------------------------------- if (timeron) timer_start(t_zsolve); #pragma omp parallel for default(shared) private(i,j,k,k1,k2,m, \ ru1,fac1,fac2) for (j = k15+1; j <= ny2; j++) { lhsinitj(nz2+1, nx2); //--------------------------------------------------------------------- // Computes the left hand side for the three z-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- for (i = 1; i <= nx2; i++) { for (k = 0; k <= nz2+1; k++) { ru1 = c3c4*rho_i[k][j][i]; cv[k] = ws[k][j][i]; rhos[k] = max(max(dz4+con43*ru1, dz5+c1c5*ru1), max(dzmax+ru1, dz1)); } for (k = 1; k <= nz2; k++) { lhs[k][i][0] = 0.0; lhs[k][i][1] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1]; lhs[k][i][2] = 1.0 + c2dttz1 * rhos[k]; lhs[k][i][3] = dttz2 * cv[k+1] - dttz1 * rhos[k+1]; lhs[k][i][4] = 0.0; } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- for (i = 1; i <= nx2; i++) { k = 1; lhs[k][i][2] = lhs[k][i][2] + comz5; lhs[k][i][3] = lhs[k][i][3] - comz4; lhs[k][i][4] = lhs[k][i][4] + comz1; k = 2; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz6; lhs[k][i][3] = lhs[k][i][3] - comz4; lhs[k][i][4] = lhs[k][i][4] + comz1; } for (k = 3; k <= nz2-2; k++) { for (i = 1; i <= nx2; i++) { lhs[k][i][0] = lhs[k][i][0] + comz1; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz6; lhs[k][i][3] = lhs[k][i][3] - comz4; lhs[k][i][4] = lhs[k][i][4] + comz1; } } for (i = 1; i <= nx2; i++) { k = nz2-1; lhs[k][i][0] = lhs[k][i][0] + comz1; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz6; lhs[k][i][3] = lhs[k][i][3] - comz4; k = nz2; lhs[k][i][0] = lhs[k][i][0] + comz1; lhs[k][i][1] = lhs[k][i][1] - comz4; lhs[k][i][2] = lhs[k][i][2] + comz5; } //--------------------------------------------------------------------- // subsequently, fill the other factors (u+c), (u-c) //--------------------------------------------------------------------- for (k = 1; k <= nz2; k++) { for (i = 1; i <= nx2; i++) { lhsp[k][i][0] = lhs[k][i][0]; lhsp[k][i][1] = lhs[k][i][1] - dttz2 * speed[k-1][j][i]; lhsp[k][i][2] = lhs[k][i][2]; lhsp[k][i][3] = lhs[k][i][3] + dttz2 * speed[k+1][j][i]; lhsp[k][i][4] = lhs[k][i][4]; lhsm[k][i][0] = lhs[k][i][0]; lhsm[k][i][1] = lhs[k][i][1] + dttz2 * speed[k-1][j][i]; lhsm[k][i][2] = lhs[k][i][2]; lhsm[k][i][3] = lhs[k][i][3] - dttz2 * speed[k+1][j][i]; lhsm[k][i][4] = lhs[k][i][4]; } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-3; k++) { k1 = k + 1; k2 = k + 2; for (i = 1; i <= nx2; i++) { fac1 = 1.0/lhs[k][i][2]; lhs[k][i][3] = fac1*lhs[k][i][3]; lhs[k][i][4] = fac1*lhs[k][i][4]; for (m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; } lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3]; lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4]; for (m = 0; m < 3; m++) { rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m]; } lhs[k2][i][1] = lhs[k2][i][1] - lhs[k2][i][0]*lhs[k][i][3]; lhs[k2][i][2] = lhs[k2][i][2] - lhs[k2][i][0]*lhs[k][i][4]; for (m = 0; m < 3; m++) { rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhs[k2][i][0]*rhs[k][j][i][m]; } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- k = grid_points[2]-2; k1 = grid_points[2]-1; for (i = 1; i <= nx2; i++) { fac1 = 1.0/lhs[k][i][2]; lhs[k][i][3] = fac1*lhs[k][i][3]; lhs[k][i][4] = fac1*lhs[k][i][4]; for (m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; } lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3]; lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4]; for (m = 0; m < 3; m++) { rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhs[k1][i][2]; for (m = 0; m < 3; m++) { rhs[k1][j][i][m] = fac2*rhs[k1][j][i][m]; } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-3; k++) { k1 = k + 1; k2 = k + 2; for (i = 1; i <= nx2; i++) { m = 3; fac1 = 1.0/lhsp[k][i][2]; lhsp[k][i][3] = fac1*lhsp[k][i][3]; lhsp[k][i][4] = fac1*lhsp[k][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3]; lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m]; lhsp[k2][i][1] = lhsp[k2][i][1] - lhsp[k2][i][0]*lhsp[k][i][3]; lhsp[k2][i][2] = lhsp[k2][i][2] - lhsp[k2][i][0]*lhsp[k][i][4]; rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsp[k2][i][0]*rhs[k][j][i][m]; m = 4; fac1 = 1.0/lhsm[k][i][2]; lhsm[k][i][3] = fac1*lhsm[k][i][3]; lhsm[k][i][4] = fac1*lhsm[k][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3]; lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m]; lhsm[k2][i][1] = lhsm[k2][i][1] - lhsm[k2][i][0]*lhsm[k][i][3]; lhsm[k2][i][2] = lhsm[k2][i][2] - lhsm[k2][i][0]*lhsm[k][i][4]; rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsm[k2][i][0]*rhs[k][j][i][m]; } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- k = grid_points[2]-2; k1 = grid_points[2]-1; for (i = 1; i <= nx2; i++) { m = 3; fac1 = 1.0/lhsp[k][i][2]; lhsp[k][i][3] = fac1*lhsp[k][i][3]; lhsp[k][i][4] = fac1*lhsp[k][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3]; lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m]; m = 4; fac1 = 1.0/lhsm[k][i][2]; lhsm[k][i][3] = fac1*lhsm[k][i][3]; lhsm[k][i][4] = fac1*lhsm[k][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3]; lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4]; rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m]; //--------------------------------------------------------------------- // Scale the last row immediately (some of this is overkill // if this is the last cell) //--------------------------------------------------------------------- rhs[k1][j][i][3] = rhs[k1][j][i][3]/lhsp[k1][i][2]; rhs[k1][j][i][4] = rhs[k1][j][i][4]/lhsm[k1][i][2]; } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- k = grid_points[2]-2; k1 = grid_points[2]-1; for (i = 1; i <= nx2; i++) { for (m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3]*rhs[k1][j][i][m]; } rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3]*rhs[k1][j][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3]*rhs[k1][j][i][4]; } //--------------------------------------------------------------------- // Whether or not this is the last processor, we always have // to complete the back-substitution //--------------------------------------------------------------------- //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- for (k = grid_points[2]-3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; for (i = 1; i <= nx2; i++) { for (m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3]*rhs[k1][j][i][m] - lhs[k][i][4]*rhs[k2][j][i][m]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3]*rhs[k1][j][i][3] - lhsp[k][i][4]*rhs[k2][j][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3]*rhs[k1][j][i][4] - lhsm[k][i][4]*rhs[k2][j][i][4]; } } //kai k15 = 0; } if (timeron) timer_stop(t_zsolve); tzetar(); }
variable_transfer_utility.h
/* ============================================================================== KratosStructuralApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel pooyan@cimne.upc.edu rrossi@cimne.upc.edu janosch.stascheit@rub.de nagel@sd.rub.de - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain - Ruhr-University Bochum, Institute for Structural Mechanics, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ /* ********************************************************* * * Last Modified by: $Author: nelson $ * Date: $Date: 2009-01-21 09:56:09 $ * Revision: $Revision: 1.12 $ * * ***********************************************************/ #if !defined(KRATOS_VARIABLE_TRANSFER_UTILITY_INCLUDED ) #define KRATOS_VARIABLE_TRANSFER_UTILITY_INCLUDED //System includes #ifdef _OPENMP #include <omp.h> #endif //External includes #include "boost/smart_ptr.hpp" #include "boost/timer.hpp" #include "boost/progress.hpp" //Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" #include "containers/array_1d.h" #include "includes/element.h" #include "integration/integration_point.h" #include "geometries/geometry.h" #include "linear_solvers/skyline_lu_factorization_solver.h" #include "spaces/ublas_space.h" #include "geometries/hexahedra_3d_8.h" #include "geometries/tetrahedra_3d_4.h" #include "structural_application.h" namespace Kratos { class VariableTransferUtility { public: typedef Dof<double> TDofType; typedef PointerVectorSet<TDofType, IndexedObject> DofsArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef double* ContainerType; typedef Element::DofsVectorType DofsVectorType; typedef Geometry<Node<3> >::IntegrationPointsArrayType IntegrationPointsArrayType; typedef Geometry<Node<3> >::GeometryType GeometryType; typedef Geometry<Node<3> >::CoordinatesArrayType CoordinatesArrayType; typedef UblasSpace<double, CompressedMatrix, Vector> SpaceType; typedef UblasSpace<double, Matrix, Vector> DenseSpaceType; typedef LinearSolver<SpaceType, DenseSpaceType> LinearSolverType; /** * Constructor. */ VariableTransferUtility() { mpLinearSolver = LinearSolverType::Pointer(new SkylineLUFactorizationSolver<SpaceType, DenseSpaceType>()); std::cout << "VariableTransferUtility created" << std::endl; mEchoLevel = 0; } VariableTransferUtility(LinearSolverType::Pointer pLinearSolver) { mpLinearSolver = pLinearSolver; std::cout << "VariableTransferUtility created" << std::endl; mEchoLevel = 0; } /** * Destructor. */ virtual ~VariableTransferUtility() {} void SetEchoLevel(int Level) { mEchoLevel = Level; } int GetEchoLevel() { return mEchoLevel; } /** * Initializes elements of target model part. * @param rTarget new/target model part * KLUDGE: new model part instance is not automatically initialized */ void InitializeModelPart( ModelPart& rTarget ) { for( ModelPart::ElementIterator it = rTarget.ElementsBegin(); it!= rTarget.ElementsEnd(); it++ ) { (*it).Initialize(); } } /** * Transfer of nodal solution step variables. * This Transfers all solution step variables from r_old_model_part * to r_new_model_part. * To cope with moved meshes, the source model_part is resetted to its * reference configuration temporarily! * @param r_old_model_part source model_part * @param r_new_model_part target model_part * TODO: find more elegant way to check existence of variables in each node */ void TransferNodalVariables(ModelPart& rSource, ModelPart& rTarget) { //reset source model part to reference configuration for( ModelPart::NodeIterator it = rSource.NodesBegin() ; it != rSource.NodesEnd(); it++ ) { (*it).X() = (*it).X0(); (*it).Y() = (*it).Y0(); (*it).Z() = (*it).Z0(); } //reset target model part to reference configuration for( ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd(); it++ ) { (*it).X() = (*it).X0(); (*it).Y() = (*it).Y0(); (*it).Z() = (*it).Z0(); } //time_target= time_source ProcessInfo SourceCurrentProcessInfo= rSource.GetProcessInfo(); rTarget.CloneTimeStep(SourceCurrentProcessInfo[TIME]); ElementsArrayType& OldMeshElementsArray= rSource.Elements(); Element::Pointer correspondingElement; // FixDataValueContainer newNodalValues; // FixDataValueContainer oldNodalValues; Point localPoint; for(ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd() ; it++) { if(FindPartnerElement(*(it), OldMeshElementsArray, correspondingElement,localPoint)) { //TransferVariables from Old Mesh to new Node if(it->HasDofFor(DISPLACEMENT_X) || it->HasDofFor(DISPLACEMENT_Y) || it->HasDofFor(DISPLACEMENT_Z)) { noalias(it->GetSolutionStepValue(DISPLACEMENT_NULL))= MappedValue(*correspondingElement, localPoint,DISPLACEMENT_NULL ); noalias(it->GetSolutionStepValue(DISPLACEMENT_EINS))= MappedValue(*correspondingElement, localPoint,DISPLACEMENT_EINS ); noalias(it->GetSolutionStepValue(DISPLACEMENT_NULL_DT))= MappedValue(*correspondingElement, localPoint,DISPLACEMENT_NULL_DT ); noalias(it->GetSolutionStepValue(ACCELERATION_NULL))= MappedValue(*correspondingElement, localPoint,ACCELERATION_NULL ); noalias(it->GetSolutionStepValue(DISPLACEMENT_OLD))= MappedValue(*correspondingElement, localPoint,DISPLACEMENT_OLD ); } if(it->HasDofFor(WATER_PRESSURE)) { it->GetSolutionStepValue(WATER_PRESSURE_NULL)= MappedValuePressure(*correspondingElement, localPoint, WATER_PRESSURE_NULL); it->GetSolutionStepValue(WATER_PRESSURE_EINS)= MappedValuePressure(*correspondingElement, localPoint, WATER_PRESSURE_EINS); it->GetSolutionStepValue(WATER_PRESSURE_NULL_DT)= MappedValuePressure(*correspondingElement, localPoint, WATER_PRESSURE_NULL_DT); it->GetSolutionStepValue(WATER_PRESSURE_NULL_ACCELERATION)= MappedValuePressure(*correspondingElement, localPoint, WATER_PRESSURE_NULL_ACCELERATION); } if(it->HasDofFor(AIR_PRESSURE)) { it->GetSolutionStepValue(AIR_PRESSURE_NULL)= MappedValuePressure(*correspondingElement, localPoint, AIR_PRESSURE_NULL); it->GetSolutionStepValue(AIR_PRESSURE_EINS)= MappedValuePressure(*correspondingElement, localPoint, AIR_PRESSURE_EINS); it->GetSolutionStepValue(AIR_PRESSURE_NULL_DT)= MappedValuePressure(*correspondingElement, localPoint, AIR_PRESSURE_NULL_DT); it->GetSolutionStepValue(AIR_PRESSURE_NULL_ACCELERATION)= MappedValuePressure(*correspondingElement, localPoint, AIR_PRESSURE_NULL_ACCELERATION); } std::cout <<"VARIABLES TRANSFERRED" << std::endl; } else { std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferNodalVariables(...)#####"<<std::endl; } } //restore source model_part for( ModelPart::NodeIterator it = rSource.NodesBegin() ; it != rSource.NodesEnd(); it++ ) { (*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X ); (*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y ); (*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z ); } //restore target model_part for( ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd(); it++ ) { (*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X ); (*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y ); (*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z ); } } /** * Transfer of PRESTRESS. * This transfers the in-situ stress from rSource to rTarget. * @param rSource the source model part * @param rTarget the target model part */ void TransferPrestress( ModelPart& rSource, ModelPart& rTarget ) { TransferSpecificVariable( rSource, rTarget, PRESTRESS ); } /** * Transfer of PRESTRESS. * This transfers the in-situ stress from rSource to rTarget. * rSource and rTarget must be identical. Otherwise it will generate errors. * @param rSource the source model part * @param rTarget the target model part */ void TransferPrestressIdentically( ModelPart& rSource, ModelPart& rTarget ) { std::vector<Vector> PreStresses; for( ModelPart::ElementIterator it = rSource.ElementsBegin(); it != rSource.ElementsEnd(); ++it ) { it->GetValueOnIntegrationPoints(PRESTRESS, PreStresses, rSource.GetProcessInfo()); rTarget.Elements()[it->Id()].SetValueOnIntegrationPoints(PRESTRESS, PreStresses, rTarget.GetProcessInfo()); } } /** * Transfer of INSITU_STRESS. * This transfers the in-situ stress from rSource to rTarget. * @param rSource the source model part * @param rTarget the target model part */ void TransferInSituStress( ModelPart& rSource, ModelPart& rTarget ) { TransferSpecificVariable( rSource, rTarget, INSITU_STRESS ); } /** * Transfer of INSITU_STRESS. * This transfers the in-situ stress from rSource to rTarget. * @param rSource the source model part * @param rTarget the target model part */ void TransferSpecificVariable( ModelPart& rSource, ModelPart& rTarget, Variable<Vector>& rThisVariable ) { boost::timer timer1; // std::cout << "line 243" << std::endl; //reset original model part to reference configuration for( ModelPart::NodeIterator it = rSource.NodesBegin() ; it != rSource.NodesEnd(); it++ ) { (*it).X() = (*it).X0(); (*it).Y() = (*it).Y0(); (*it).Z() = (*it).Z0(); } // std::cout << "line 253" << std::endl; for( ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd(); it++ ) { (*it).X() = (*it).X0(); (*it).Y() = (*it).Y0(); (*it).Z() = (*it).Z0(); } std::cout << "time for resetting to reference configuration: " << timer1.elapsed() << std::endl; timer1.restart(); // std::cout << "line 263" << std::endl; TransferVariablesToNodes(rSource, rThisVariable); std::cout << "time for transferring GP variables to nodes: " << timer1.elapsed() << std::endl; timer1.restart(); // TransferVariablesBetweenMeshes(rSource, rTarget,INSITU_STRESS); // std::cout << "line 268" << std::endl; // TransferVariablesToGaussPoints(rTarget, INSITU_STRESS); TransferVariablesToGaussPoints(rSource, rTarget, rThisVariable ); std::cout << "time for transferring variables to gauss points: " << timer1.elapsed() << std::endl; timer1.restart(); //restore model_part // std::cout << "line 272" << std::endl; for( ModelPart::NodeIterator it = rSource.NodesBegin() ; it != rSource.NodesEnd(); it++ ) { (*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X ); (*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y ); (*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z ); } for( ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd(); it++ ) { (*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X ); (*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y ); (*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z ); } std::cout << "time for restoring model part: " << timer1.elapsed() << std::endl; // std::cout << "line 290" << std::endl; } /** * Transfer of integration point variables. * This Transfers all variables on integration points from r_old_model_part * to r_new_model_part. * To cope with moved meshes, the source model_part is resetted to its * reference configuration temporarily! * @param r_old_model_part source model_part * @param r_new_model_part target model_part * TODO: find more elegant way to check existence of variables in each node * CAUTION: THIS MAY CREATE VARIABLES ON NODES THAT MIGHT CAUSE A SEGMENTATION * FAULT ON RUNTIME */ void TransferConstitutiveLawVariables(ModelPart& rSource, ModelPart& rTarget) { //reset source model part to reference configuration for( ModelPart::NodeIterator it = rSource.NodesBegin() ; it != rSource.NodesEnd(); it++ ) { (*it).X() = (*it).X0(); (*it).Y() = (*it).Y0(); (*it).Z() = (*it).Z0(); } //reset target model part to reference configuration for( ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd(); it++ ) { (*it).X() = (*it).X0(); (*it).Y() = (*it).Y0(); (*it).Z() = (*it).Z0(); } TransferVariablesToNodes(rSource, ELASTIC_LEFT_CAUCHY_GREEN_OLD); // TransferVariablesBetweenMeshes(rSource, rTarget,ELASTIC_LEFT_CAUCHY_GREEN_OLD); // // TransferVariablesToGaussPoints(rTarget, ELASTIC_LEFT_CAUCHY_GREEN_OLD); TransferVariablesToGaussPoints( rSource, rTarget, ELASTIC_LEFT_CAUCHY_GREEN_OLD); for( ModelPart::NodeIterator it = rSource.NodesBegin() ; it != rSource.NodesEnd(); it++ ) { (*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X ); (*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y ); (*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z ); } // restore target model_part for( ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd(); it++ ) { (*it).X() = (*it).X0()+(*it).GetSolutionStepValue( DISPLACEMENT_X ); (*it).Y() = (*it).Y0()+(*it).GetSolutionStepValue( DISPLACEMENT_Y ); (*it).Z() = (*it).Z0()+(*it).GetSolutionStepValue( DISPLACEMENT_Z ); } } /** * Transfer of rThisVariable stored on nodes to integration point via * approximation by shape functions * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToGaussPoints(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable) * @see TransferVariablesToGaussPoints(ModelPart& model_part, Variable<double>& rThisVariable) */ void TransferVariablesToGaussPoints(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) { ElementsArrayType& ElementsArray= model_part.Elements(); for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); it != ElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); std::vector<Matrix> ValuesOnIntPoint(integration_points.size()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); for( unsigned int PointNumber = 0; PointNumber<integration_points.size(); PointNumber++) { ValuesOnIntPoint[PointNumber].resize(3,3,false); noalias(ValuesOnIntPoint[PointNumber])= ZeroMatrix(3,3); for(unsigned int node= 0; node< (*it)->GetGeometry().size(); node++) { ValuesOnIntPoint[PointNumber] +=Ncontainer(PointNumber, node)* (*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable); } } (*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); } } /** * Transfer of rThisVariable stored on nodes to integration point via * approximation by shape functions * @param model_part model_part on which the transfer should be done * @param rThisVariable Vector-Variable which should be transferred * @see TransferVariablesToGaussPoints(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToGaussPoints(ModelPart& model_part, Variable<double>& rThisVariable) */ void TransferVariablesToGaussPoints(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable) { ElementsArrayType& ElementsArray= model_part.Elements(); for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); it != ElementsArray.ptr_end(); ++it ) { // std::cout << "line 417" << std::endl; unsigned int NodesDispMin= 1; unsigned int NodesDispMax= (*it)->GetGeometry().size(); const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); std::vector<Vector> ValuesOnIntPoint(integration_points.size()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); for( unsigned int PointNumber = 0; PointNumber<integration_points.size(); PointNumber++) { ValuesOnIntPoint[PointNumber].resize(6,false); noalias(ValuesOnIntPoint[PointNumber])= ZeroVector(6); for(unsigned int node= NodesDispMin-1; node< NodesDispMax; node++) { ValuesOnIntPoint[PointNumber] +=Ncontainer(PointNumber, node)* (*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable); } } // std::cout << "line 444" << std::endl; (*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); } // std::cout << "line 449" << std::endl; } /** * Transfer of rThisVariable stored on nodes to integration point via * approximation by shape functions * @param model_part model_part on which the transfer should be done * @param rThisVariable double-Variable which should be transferred * @see TransferVariablesToGaussPoints(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToGaussPoints(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable) */ void TransferVariablesToGaussPoints(ModelPart& model_part, Variable<double>& rThisVariable) { ElementsArrayType& ElementsArray= model_part.Elements(); for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); it != ElementsArray.ptr_end(); ++it ) { unsigned int NodesDispMin= 1; unsigned int NodesDispMax= (*it)->GetGeometry().size(); const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); std::vector<double> ValuesOnIntPoint(integration_points.size()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); for( unsigned int PointNumber = 0; PointNumber<integration_points.size(); PointNumber++) { ValuesOnIntPoint[PointNumber]= 0.0; for(unsigned int node= NodesDispMin-1; node< NodesDispMax; node++) { ValuesOnIntPoint[PointNumber] +=Ncontainer(PointNumber, node)* (*it)->GetGeometry()[node].GetSolutionStepValue(rThisVariable); } } (*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); } } /** * Transfer of rThisVariable stored on nodes in source mesh to integration point of target * mesh via approximation by shape functions * @param rSource * @param rTarget * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToGaussPoints(ModelPart& source_model_part, * ModelPart& source_model_part, Variable<Kratos::Vector>& rThisVariable) * @see TransferVariablesToGaussPoints(ModelPart& source_model_part, * ModelPart& source_model_part, Variable<double>& rThisVariable) */ void TransferVariablesToGaussPoints(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Matrix>& rThisVariable) { ElementsArrayType& SourceMeshElementsArray= rSource.Elements(); ElementsArrayType& TargetMeshElementsArray= rTarget.Elements(); for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin(); it != TargetMeshElementsArray.ptr_end(); ++it ) { std::cout << (*it)->Id() << std::endl; const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); std::vector<Matrix> ValuesOnIntPoint(integration_points.size()); for(unsigned int point=0; point< integration_points.size(); point++) { Point sourceLocalPoint; Point targetLocalPoint; noalias(targetLocalPoint)= integration_points[point]; Point targetGlobalPoint; (*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint); Element::Pointer sourceElement; //Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray, sourceElement,sourceLocalPoint)) { ValuesOnIntPoint[point].resize(3,3,false); ValuesOnIntPoint[point]= ZeroMatrix(3,3); ValuesOnIntPoint[point]= ValueMatrixInOldMesh(*sourceElement, sourceLocalPoint, rThisVariable ); } } (*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint, rTarget.GetProcessInfo()); } } /** * Transfer of rThisVariable stored on nodes in source mesh to integration point of target * mesh via approximation by shape functions * @param rSource * @param rTarget * @param rThisVariable Vector-Variable which should be transferred * @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart& source_model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart& source_model_part, Variable<double>& rThisVariable) */ void TransferVariablesToGaussPoints(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Vector>& rThisVariable) { std::cout << "At TransferVariablesToGaussPoints(" << rSource.Name() << "," << rTarget.Name() << ", Variable<Vector> " << rThisVariable.Name() << std::endl; ElementsArrayType& SourceMeshElementsArray= rSource.Elements(); ElementsArrayType& TargetMeshElementsArray= rTarget.Elements(); int number_of_threads = 1; vector<unsigned int> element_partition; #ifdef _OPENMP number_of_threads = omp_get_max_threads(); double start_transfer = omp_get_wtime(); #endif CreatePartition(number_of_threads, TargetMeshElementsArray.size(), element_partition); KRATOS_WATCH( number_of_threads ); KRATOS_WATCH( element_partition ); boost::progress_display show_progress( TargetMeshElementsArray.size() ); #ifdef _OPENMP #pragma omp parallel for #endif for(int k = 0; k < number_of_threads; ++k) { ElementsArrayType::ptr_iterator it_begin = TargetMeshElementsArray.ptr_begin() + element_partition[k]; ElementsArrayType::ptr_iterator it_end = TargetMeshElementsArray.ptr_begin() + element_partition[k+1]; for (ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { // KRATOS_WATCH((*it)->Id()) const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); std::vector<Vector> ValuesOnIntPoint(integration_points.size()); // KRATOS_WATCH(integration_points.size()) for(unsigned int point = 0; point< integration_points.size(); ++point) { Point sourceLocalPoint; Point targetLocalPoint; noalias(targetLocalPoint) = integration_points[point]; Point targetGlobalPoint; (*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint); // KRATOS_WATCH(targetGlobalPoint) Element::Pointer sourceElement; //Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh ValuesOnIntPoint[point].resize(6, false); if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray, sourceElement,sourceLocalPoint)) { // KRATOS_WATCH(sourceElement->Id()) noalias(ValuesOnIntPoint[point])= ValueVectorInOldMesh(*sourceElement, sourceLocalPoint, rThisVariable ); } } (*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint, rTarget.GetProcessInfo()); ++show_progress; } } #ifdef _OPENMP double stop_transfer = omp_get_wtime(); std::cout << "time: " << stop_transfer - start_transfer << std::endl; #endif } /** * Transfer of rThisVariable stored on nodes in source mesh to integration point of target * mesh via approximation by shape functions * @param rSource * @param rTarget * @param rThisVariable double-Variable which should be transferred * @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart& source_model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToGaussPoints(ModelPart& source_model_part, ModelPart& source_model_part, Variable<Kratos::Vector>& rThisVariable) */ void TransferVariablesToGaussPoints(ModelPart& rSource, ModelPart& rTarget, Variable<double>& rThisVariable) { ElementsArrayType& SourceMeshElementsArray= rSource.Elements(); ElementsArrayType& TargetMeshElementsArray= rTarget.Elements(); for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin(); it != TargetMeshElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); std::vector<double> ValuesOnIntPoint(integration_points.size()); for(unsigned int point=0; point< integration_points.size(); point++) { Point sourceLocalPoint; Point targetLocalPoint; noalias(targetLocalPoint)= integration_points[point]; Point targetGlobalPoint; (*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint,targetLocalPoint); Element::Pointer sourceElement; //Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray, sourceElement,sourceLocalPoint)) { ValuesOnIntPoint[point]= MappedValue(*sourceElement, sourceLocalPoint, rThisVariable ); } } (*it)->SetValueOnIntegrationPoints( rThisVariable, ValuesOnIntPoint, rTarget.GetProcessInfo()); } } /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable) * @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! */ void TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) { ElementsArrayType& ElementsArray= model_part.Elements(); //loop over all master surfaces (global search) for(ModelPart::NodeIterator it = model_part.NodesBegin(); it != model_part.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable) = ZeroMatrix(3,3); } //SetUpEquationSystem SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes()); SpaceType::VectorType g(model_part.NumberOfNodes()); SpaceType::VectorType b(model_part.NumberOfNodes()); noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes()); for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); it != ElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++) { for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++) { M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+= Ncontainer(point, prim)*Ncontainer(point, sec)*dV; } } } } for(unsigned int firstvalue=0; firstvalue<3; firstvalue++) { for(unsigned int secondvalue=0; secondvalue<3; secondvalue++) { noalias(g)= ZeroVector(model_part.NumberOfNodes()); noalias(b)= ZeroVector(model_part.NumberOfNodes()); //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); it != ElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); std::vector<Matrix> ValuesOnIntPoint(integration_points.size()); (*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) { b(((*it)->GetGeometry()[prim].Id()-1)) +=(ValuesOnIntPoint[point](firstvalue,secondvalue)) *Ncontainer(point, prim)*dV; } } } mpLinearSolver->Solve(M, g, b); for(ModelPart::NodeIterator it = model_part.NodesBegin() ; it != model_part.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue) = g((it->Id()-1)); } }//END firstvalue }//END secondvalue } /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! */ // serial version // void TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable) // { // ElementsArrayType& ElementsArray= model_part.Elements(); // //loop over all master surfaces (global search) // for(ModelPart::NodeIterator it = model_part.NodesBegin(); // it != model_part.NodesEnd() ; it++) // { // it->GetSolutionStepValue(rThisVariable) // = ZeroVector(6); // } // //SetUpEquationSystem // SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes()); // SpaceType::VectorType g(model_part.NumberOfNodes()); // SpaceType::VectorType b(model_part.NumberOfNodes()); // noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes()); // for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); // it != ElementsArray.ptr_end(); ++it ) // { // const IntegrationPointsArrayType& integration_points // = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); // GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); // Matrix InvJ(3,3); // double DetJ; // for(unsigned int point=0; point< integration_points.size(); point++) // { // MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); // double dV= DetJ*integration_points[point].Weight(); // for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) // { // for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++) // { // M(((*it)->GetGeometry()[prim].Id()-1), // ((*it)->GetGeometry()[sec].Id()-1))+= // Ncontainer(point, prim)*Ncontainer(point, sec)*dV; // } // } // } // } // for(unsigned int firstvalue=0; firstvalue<6; firstvalue++) // { // noalias(g)= ZeroVector(model_part.NumberOfNodes()); // noalias(b)= ZeroVector(model_part.NumberOfNodes()); // //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // // see Jiao + Heath "Common-refinement-based data tranfer ..." // // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // // for general description of L_2-Minimization // for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); // it != ElementsArray.ptr_end(); // ++it ) // { // const IntegrationPointsArrayType& integration_points // = (*it)->GetGeometry().IntegrationPoints( (*it)->GetIntegrationMethod()); // GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // std::vector<Vector> ValuesOnIntPoint(integration_points.size()); // (*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); // Matrix InvJ(3,3); // double DetJ; // for(unsigned int point=0; point< integration_points.size(); point++) // { // MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); // double dV= DetJ*integration_points[point].Weight(); // for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) // { // b(((*it)->GetGeometry()[prim].Id()-1)) // +=(ValuesOnIntPoint[point](firstvalue)) // *Ncontainer(point, prim)*dV; // } // } // } // mpLinearSolver->Solve(M, g, b); // for(ModelPart::NodeIterator it = model_part.NodesBegin() ; // it != model_part.NodesEnd() ; it++) // { // it->GetSolutionStepValue(rThisVariable)(firstvalue) // = g((it->Id()-1)); // } // }//END firstvalue // } // omp version void TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable) { ElementsArrayType& ElementsArray= model_part.Elements(); //reset values at node//update by hbui: we should not do this, since some variable is at node, an then transfer again to node for(ModelPart::NodeIterator it = model_part.NodesBegin(); it != model_part.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable) = ZeroVector(6); } //SetUpEquationSystem SpaceType::MatrixType M(model_part.NumberOfNodes(), model_part.NumberOfNodes()); SpaceType::VectorType g(model_part.NumberOfNodes()); SpaceType::VectorType b(model_part.NumberOfNodes()); noalias(M)= ZeroMatrix(model_part.NumberOfNodes(), model_part.NumberOfNodes()); int number_of_threads = 1; #ifdef _OPENMP number_of_threads = omp_get_max_threads(); #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, ElementsArray.size(), element_partition); boost::progress_display show_progress( ElementsArray.size() ); // create the structure for M a priori // Timer::Start("ConstructMatrixStructure"); ConstructMatrixStructure(M, ElementsArray, model_part.GetProcessInfo()); // Timer::Stop("ConstructMatrixStructure"); #ifdef _OPENMP //create the array of lock std::vector< omp_lock_t > lock_array(M.size1()); unsigned int M_size = M.size1(); for(unsigned int i = 0; i < M_size; ++i) omp_init_lock(&lock_array[i]); #endif // Timer::Start("Assemble Transferred stiffness matrix"); #ifdef _OPENMP #pragma omp parallel for #endif for(int k = 0; k < number_of_threads; ++k) { ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k+1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) { unsigned int row = ((*it)->GetGeometry()[prim].Id()-1); #ifdef _OPENMP omp_set_lock(&lock_array[row]); #endif for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++) { unsigned int col = ((*it)->GetGeometry()[sec].Id()-1); M(row, col)+= Ncontainer(point, prim)*Ncontainer(point, sec) * dV; } #ifdef _OPENMP omp_unset_lock(&lock_array[row]); #endif } } ++show_progress; } } // Timer::Stop("Assemble Transferred stiffness matrix"); for(unsigned int firstvalue = 0; firstvalue < 6; ++firstvalue) { noalias(g)= ZeroVector(model_part.NumberOfNodes()); noalias(b)= ZeroVector(model_part.NumberOfNodes()); //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization // Timer::Start("Assemble Transferred rhs vector"); #ifdef _OPENMP #pragma omp parallel for #endif for(int k = 0; k < number_of_threads; ++k) { ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k+1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints( (*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); std::vector<Vector> ValuesOnIntPoint(integration_points.size()); (*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) { unsigned int row = ((*it)->GetGeometry()[prim].Id()-1); #ifdef _OPENMP omp_set_lock(&lock_array[row]); #endif b(row) += (ValuesOnIntPoint[point](firstvalue)) * Ncontainer(point, prim) * dV; #ifdef _OPENMP omp_unset_lock(&lock_array[row]); #endif } } } } // Timer::Stop("Assemble Transferred rhs vector"); // Timer::Start("Transfer solve"); mpLinearSolver->Solve(M, g, b); // Timer::Stop("Transfer solve"); // Timer::Start("Transfer result"); for(ModelPart::NodeIterator it = model_part.NodesBegin() ; it != model_part.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable)(firstvalue) = g((it->Id()-1)); } // Timer::Stop("Transfer result"); }//END firstvalue #ifdef _OPENMP for(unsigned int i = 0; i < M_size; ++i) omp_destroy_lock(&lock_array[i]); #endif std::cout << "TransferVariablesToNodes for " << rThisVariable.Name() << " completed" << std::endl; } /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesToNodes(ModelPart& model_part, Variable<Kratos::Vector>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! */ // void TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) // { // ElementsArrayType& ElementsArray= model_part.Elements(); // //loop over all master surfaces (global search) // for(ModelPart::NodeIterator it = model_part.NodesBegin(); // it != model_part.NodesEnd() ; it++) // { // it->GetSolutionStepValue(rThisVariable) // = 0.0; // } // // //SetUpEquationSystem // SpaceType::MatrixType M(model_part.NumberOfNodes(),model_part.NumberOfNodes()); // noalias(M)= ZeroMatrix(model_part.NumberOfNodes(),model_part.NumberOfNodes()); // SpaceType::VectorType g(model_part.NumberOfNodes()); // noalias(g)= ZeroVector(model_part.NumberOfNodes()); // SpaceType::VectorType b(model_part.NumberOfNodes()); // noalias(b)= ZeroVector(model_part.NumberOfNodes()); // //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // // see Jiao + Heath "Common-refinement-based data tranfer ..." // // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // // for general description of L_2-Minimization // for( ElementsArrayType::ptr_iterator it = ElementsArray.ptr_begin(); // it != ElementsArray.ptr_end(); // ++it ) // { // const IntegrationPointsArrayType& integration_points // = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); // // GeometryType::JacobiansType J(integration_points.size()); // J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); // // std::vector<double> ValuesOnIntPoint(integration_points.size()); // // (*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); // // const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); // // Matrix InvJ(3,3); // double DetJ; // // for(unsigned int point=0; point< integration_points.size(); point++) // { // MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); // // double dV= DetJ*integration_points[point].Weight(); // for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) // { // b(((*it)->GetGeometry()[prim].Id()-1)) // +=(ValuesOnIntPoint[point]) // *Ncontainer(point, prim)*dV; // for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++) // { // M(((*it)->GetGeometry()[prim].Id()-1), // ((*it)->GetGeometry()[sec].Id()-1))+= // Ncontainer(point, prim)*Ncontainer(point, sec)*dV; // } // } // } // } // mpLinearSolver->Solve(M, g, b); // for(ModelPart::NodeIterator it = model_part.NodesBegin() ; // it != model_part.NodesEnd() ; it++) // { // it->GetSolutionStepValue(rThisVariable) // = g((it->Id()-1)); // } // } void TransferVariablesToNodes(ModelPart& model_part, Variable<double>& rThisVariable) { ElementsArrayType& ElementsArray= model_part.Elements(); //reset all values at node to zero// do not do this, some variable need the values at node, e.g. WATER_PRESSURE // for(ModelPart::NodeIterator it = model_part.NodesBegin(); // it != model_part.NodesEnd() ; it++) // { // it->GetSolutionStepValue(rThisVariable) = 0.0; // } //SetUpEquationSystem SpaceType::MatrixType M(model_part.NumberOfNodes(), model_part.NumberOfNodes()); SpaceType::VectorType g(model_part.NumberOfNodes()); SpaceType::VectorType b(model_part.NumberOfNodes()); noalias(M)= ZeroMatrix(model_part.NumberOfNodes(), model_part.NumberOfNodes()); int number_of_threads = 1; #ifdef _OPENMP number_of_threads = omp_get_max_threads(); #endif vector<unsigned int> element_partition; CreatePartition(number_of_threads, ElementsArray.size(), element_partition); boost::progress_display show_progress( ElementsArray.size() ); // create the structure for M a priori // Timer::Start("ConstructMatrixStructure"); ConstructMatrixStructure(M, ElementsArray, model_part.GetProcessInfo()); // Timer::Stop("ConstructMatrixStructure"); #ifdef _OPENMP //create the array of lock std::vector< omp_lock_t > lock_array(M.size1()); unsigned int M_size = M.size1(); for(unsigned int i = 0; i < M_size; ++i) omp_init_lock(&lock_array[i]); #endif // Timer::Start("Assemble Transferred stiffness matrix"); #ifdef _OPENMP #pragma omp parallel for #endif for(int k = 0; k < number_of_threads; ++k) { ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k+1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) { unsigned int row = ((*it)->GetGeometry()[prim].Id()-1); #ifdef _OPENMP omp_set_lock(&lock_array[row]); #endif for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size(); sec++) { unsigned int col = ((*it)->GetGeometry()[sec].Id()-1); M(row, col)+= Ncontainer(point, prim)*Ncontainer(point, sec) * dV; } #ifdef _OPENMP omp_unset_lock(&lock_array[row]); #endif } } ++show_progress; } } // Timer::Stop("Assemble Transferred stiffness matrix"); noalias(g)= ZeroVector(model_part.NumberOfNodes()); noalias(b)= ZeroVector(model_part.NumberOfNodes()); //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization // Timer::Start("Assemble Transferred rhs vector"); #ifdef _OPENMP #pragma omp parallel for #endif for(int k = 0; k < number_of_threads; ++k) { ElementsArrayType::ptr_iterator it_begin = ElementsArray.ptr_begin() + element_partition[k]; ElementsArrayType::ptr_iterator it_end = ElementsArray.ptr_begin() + element_partition[k+1]; for( ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints( (*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); std::vector<double> ValuesOnIntPoint(integration_points.size()); (*it)->GetValueOnIntegrationPoints(rThisVariable, ValuesOnIntPoint, model_part.GetProcessInfo()); // std::cout << "ValuesOnIntPoint at element " << (*it)->Id() << ":"; // for(std::size_t i = 0; i < integration_points.size(); ++i) // std::cout << " " << ValuesOnIntPoint[i]; // std::cout << std::endl; const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size(); prim++) { unsigned int row = ((*it)->GetGeometry()[prim].Id()-1); #ifdef _OPENMP omp_set_lock(&lock_array[row]); #endif b(row) += ValuesOnIntPoint[point] * Ncontainer(point, prim) * dV; #ifdef _OPENMP omp_unset_lock(&lock_array[row]); #endif } } } } // Timer::Stop("Assemble Transferred rhs vector"); // Timer::Start("Transfer solve"); mpLinearSolver->Solve(M, g, b); // Timer::Stop("Transfer solve"); // Timer::Start("Transfer result"); for(ModelPart::NodeIterator it = model_part.NodesBegin() ; it != model_part.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable) = g((it->Id()-1)); } // Timer::Stop("Transfer result"); #ifdef _OPENMP for(unsigned int i = 0; i < M_size; ++i) omp_destroy_lock(&lock_array[i]); #endif std::cout << "TransferVariablesToNodes for " << rThisVariable.Name() << " completed" << std::endl; } /** * Transfer of rThisVariable stored on nodes form source mesh to target mesh. * The transformation is done in a way that ensures a minimization * of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas * f(x)_old/new= /sum{shape_func_i*rThisVariable_i} * @param rSource source model_part * @param rTarget target model_part * @param rThisVariable Matrix-Variable which should be transferred * @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Vector>& rThisVariable) * @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 */ void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Matrix>& rThisVariable) { ElementsArrayType& SourceMeshElementsArray= rSource.Elements(); ElementsArrayType& TargetMeshElementsArray= rTarget.Elements(); //loop over all master surfaces (global search) for(ModelPart::NodeIterator it = rTarget.NodesBegin(); it != rTarget.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable) = ZeroMatrix(3,3); } //SetUpEquationSystem SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes()); noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes()); SpaceType::VectorType g(rTarget.NumberOfNodes()); SpaceType::VectorType b(rTarget.NumberOfNodes()); for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin(); it != TargetMeshElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0; prim<(*it)->GetGeometry().size() ; prim++) { for(unsigned int sec=0; sec<(*it)->GetGeometry().size() ; sec++) { M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+= Ncontainer(point, prim)*Ncontainer(point, sec)*dV; } } } } for(unsigned int firstvalue= 0; firstvalue< 3; firstvalue++) { for(unsigned int secondvalue= 0; secondvalue< 3; secondvalue++) { noalias(b)= ZeroVector(rTarget.NumberOfNodes()); noalias(g)= ZeroVector(rTarget.NumberOfNodes()); //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin(); it != TargetMeshElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); Point sourceLocalPoint; Point targetLocalPoint; noalias(targetLocalPoint)= integration_points[point]; Point targetGlobalPoint; (*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint, targetLocalPoint); Element::Pointer sourceElement; double functionValue; //Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray, sourceElement,sourceLocalPoint)) { functionValue= ValueMatrixInOldMesh( *sourceElement,sourceLocalPoint,rThisVariable, firstvalue, secondvalue ); } else { std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...Matrix...)#####"<<std::endl; continue; } double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0; prim<(*it)->GetGeometry().size(); prim++) { b(((*it)->GetGeometry()[prim].Id()-1)) +=functionValue *Ncontainer(point, prim)*dV; } } } mpLinearSolver->Solve(M, g, b); for(ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue) = g((it->Id()-1)); } }//END firstvalue }//END secondvalue } /** * Transfer of rThisVariable stored on nodes form source mesh to target mesh. * The transformation is done in a way that ensures a minimization * of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas * f(x)_old/new= /sum{shape_func_i*rThisVariable_i} * @param rSource source model_part * @param rTarget target model_part * @param rThisVariable Vector-Variable which should be transferred * @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTargetw, Variable<double>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 */ void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Vector>& rThisVariable) { ElementsArrayType& SourceMeshElementsArray= rSource.Elements(); ElementsArrayType& TargetMeshElementsArray= rTarget.Elements(); //loop over all master surfaces (global search) for(ModelPart::NodeIterator it = rTarget.NodesBegin(); it != rTarget.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable) = ZeroVector(6); } //SetUpEquationSystem SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes()); noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes()); SpaceType::VectorType g(rTarget.NumberOfNodes()); SpaceType::VectorType b(rTarget.NumberOfNodes()); for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin(); it != TargetMeshElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size() ; prim++) { for(unsigned int sec=0 ; sec<(*it)->GetGeometry().size() ; sec++) { M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+= Ncontainer(point, prim)*Ncontainer(point, sec)*dV; } } } } for(unsigned int firstvalue= 0; firstvalue< 6; firstvalue++) { noalias(b)= ZeroVector(rTarget.NumberOfNodes()); noalias(g)= ZeroVector(rTarget.NumberOfNodes()); //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin(); it != TargetMeshElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); Point sourceLocalPoint; Point targetLocalPoint; noalias(targetLocalPoint)= integration_points[point]; Point targetGlobalPoint; (*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint, targetLocalPoint); Element::Pointer sourceElement; double functionValue; //Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray, sourceElement,sourceLocalPoint)) { functionValue= ValueVectorInOldMesh( *sourceElement,sourceLocalPoint,rThisVariable, firstvalue); } else { std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...Vector...)#####"<<std::endl; continue; } double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size() ; prim++) { b(((*it)->GetGeometry()[prim].Id()-1)) +=functionValue *Ncontainer(point, prim)*dV; } } } mpLinearSolver->Solve(M, g, b); for(ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable)(firstvalue) = g((it->Id()-1)); } }//END firstvalue } /** * Transfer of rThisVariable stored on nodes form source mesh to target mesh. * The transformation is done in a way that ensures a minimization * of L_2-norm error (/sum{f_old(x)- f_new(x)) whereas * f(x)_old/new= /sum{shape_func_i*rThisVariable_i} * @param rSource source model_part * @param rTarget target model_part * @param rThisVariable double-Variable which should be transferred * @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Matrix>& rThisVariable) * @see TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<Kratos::Vector>& rThisVariable) * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 */ void TransferVariablesBetweenMeshes(ModelPart& rSource, ModelPart& rTarget, Variable<double>& rThisVariable) { ElementsArrayType& SourceMeshElementsArray= rSource.Elements(); ElementsArrayType& TargetMeshElementsArray= rTarget.Elements(); //loop over all master surfaces (global search) for(ModelPart::NodeIterator it = rTarget.NodesBegin(); it != rTarget.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable) = 0.0; } //SetUpEquationSystem SpaceType::MatrixType M(rTarget.NumberOfNodes(),rTarget.NumberOfNodes()); noalias(M)= ZeroMatrix(rTarget.NumberOfNodes(),rTarget.NumberOfNodes()); SpaceType::VectorType g(rTarget.NumberOfNodes()); noalias(g)= ZeroVector(rTarget.NumberOfNodes()); SpaceType::VectorType b(rTarget.NumberOfNodes()); noalias(b)= ZeroVector(rTarget.NumberOfNodes()); //Transfer of GaussianVariables to Nodal Variablias via L_2-Minimization // see Jiao + Heath "Common-refinement-based data tranfer ..." // International Journal for numerical methods in engineering 61 (2004) 2402--2427 // for general description of L_2-Minimization for( ElementsArrayType::ptr_iterator it = TargetMeshElementsArray.ptr_begin(); it != TargetMeshElementsArray.ptr_end(); ++it ) { const IntegrationPointsArrayType& integration_points = (*it)->GetGeometry().IntegrationPoints((*it)->GetIntegrationMethod()); GeometryType::JacobiansType J(integration_points.size()); J = (*it)->GetGeometry().Jacobian(J, (*it)->GetIntegrationMethod()); const Matrix& Ncontainer = (*it)->GetGeometry().ShapeFunctionsValues((*it)->GetIntegrationMethod()); Matrix InvJ(3,3); double DetJ; for(unsigned int point=0; point< integration_points.size(); point++) { MathUtils<double>::InvertMatrix(J[point],InvJ,DetJ); Point sourceLocalPoint; Point targetLocalPoint; noalias(targetLocalPoint)= integration_points[point]; Point targetGlobalPoint; (*it)->GetGeometry().GlobalCoordinates(targetGlobalPoint, targetLocalPoint); Element::Pointer sourceElement; double functionValue; //Calculate Value of rVariable(firstvalue, secondvalue) in OldMesh if(FindPartnerElement(targetGlobalPoint, SourceMeshElementsArray, sourceElement,sourceLocalPoint)) { functionValue= MappedValue( *sourceElement,sourceLocalPoint,rThisVariable); } else { std::cout<<"###### NO PARTNER FOUND IN OLD MESH : TransferVariablesBetweenMeshes(...double...)#####"<<std::endl; continue; } double dV= DetJ*integration_points[point].Weight(); for(unsigned int prim=0 ; prim<(*it)->GetGeometry().size() ; prim++) { b(((*it)->GetGeometry()[prim].Id()-1)) +=functionValue*Ncontainer(point, prim)*dV; for(unsigned int sec=0; sec<(*it)->GetGeometry().size(); sec++) { M(((*it)->GetGeometry()[prim].Id()-1), ((*it)->GetGeometry()[sec].Id()-1))+= Ncontainer(point, prim)*Ncontainer(point, sec)*dV; } } } } mpLinearSolver->Solve(M, g, b); for(ModelPart::NodeIterator it = rTarget.NodesBegin() ; it != rTarget.NodesEnd() ; it++) { it->GetSolutionStepValue(rThisVariable) = g((it->Id()-1)); } } /** * Auxiliary function. * This one calculates the target value of given Variable by shape-function-based * interpolation of the nodal values from given source element to the given * target point that is assumed to lie within the source element * @return value of given variable in new point * @param oldElement corresponding element in source mesh * @param localPoint given target point to map the variable to * @param rThisVariable given variable to be transferred * @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Vector>& rThisVariable ) * @see MappedValue( Element& sourceElement,Point& targetPoint, const Variable<double>& rThisVariable) */ Matrix ValueMatrixInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Matrix>& rThisVariable ) { Matrix newValue(3,3); noalias(newValue) = ZeroMatrix(3,3); Matrix temp(3,3); Vector shape_functions_values; shape_functions_values = oldElement.GetGeometry().ShapeFunctionsValues(shape_functions_values, localPoint); for(unsigned int i=0; i< oldElement.GetGeometry().size(); i++) { noalias(temp) = oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable); for(unsigned int k=0; k<3; k++) for(unsigned int l=0; l<3; l++) newValue(k,l) += shape_functions_values[i] * temp(k,l); } return newValue; } /** * Auxiliary function. * This one calculates the target value of given Variable by shape-function-based * interpolation of the nodal values from given source element to the given * target point that is assumed to lie within the source element * @return value of given variable in new point * @param oldElement corresponding element in source mesh * @param localPoint given target point to map the variable to * @param rThisVariable given variable to be transferred * @see ValueMatrixInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Matrix>& rThisVariable ) * @see MappedValue( Element& sourceElement,Point& targetPoint, const Variable<double>& rThisVariable) */ Vector ValueVectorInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Vector>& rThisVariable ) { Vector newValue(6); noalias(newValue) = ZeroVector(6); Vector temp(6); Vector shape_functions_values; shape_functions_values = oldElement.GetGeometry().ShapeFunctionsValues(shape_functions_values, localPoint); for(unsigned int i=0; i<oldElement.GetGeometry().size(); i++) { noalias(temp)= oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable); for(unsigned int k=0; k<6; k++) newValue(k) += shape_functions_values[i] * temp(k); } return newValue; } /** * Auxiliary function. * This one calculates the target value of given Variable by shape-function-based * interpolation of the nodal values from given source element to the given * target point that is assumed to lie within the source element * @return value of given variable in new point * @param sourceElement corresponding element in source mesh * @param targetPoint given target point to map the variable to * @param rThisVariable given variable to be transferred * @see ValueMatrixInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Matrix>& rThisVariable ) * @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Vector>& rThisVariable ) */ double MappedValuePressure( Element& sourceElement,Point& targetPoint, const Variable<double>& rThisVariable) { double newValue = 0.0; Geometry<Node<3> >::Pointer pPressureGeometry; if(sourceElement.GetGeometry().size()==20 || sourceElement.GetGeometry().size()==27) pPressureGeometry= Geometry<Node<3> >::Pointer(new Hexahedra3D8 <Node<3> >( sourceElement.GetGeometry()(0),sourceElement.GetGeometry()(1), sourceElement.GetGeometry()(2),sourceElement.GetGeometry()(3), sourceElement.GetGeometry()(4),sourceElement.GetGeometry()(5), sourceElement.GetGeometry()(6),sourceElement.GetGeometry()(7))); if(sourceElement.GetGeometry().size()==10 ) pPressureGeometry= Geometry<Node<3> >::Pointer(new Tetrahedra3D4 <Node<3> >( sourceElement.GetGeometry()(0),sourceElement.GetGeometry()(1), sourceElement.GetGeometry()(2),sourceElement.GetGeometry()(3))); Vector shape_functions_values; shape_functions_values = pPressureGeometry->ShapeFunctionsValues(shape_functions_values, targetPoint); for(unsigned int i= 0; i< pPressureGeometry->size(); i++) { newValue += shape_functions_values[i] * sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable); } return newValue; } /** * Auxiliary function. * This one calculates the target value of given Variable by shape-function-based * interpolation of the nodal values from given source element to the given * target point that is assumed to lie within the source element * @return value of given variable in new point * @param sourceElement corresponding element in source mesh * @param targetPoint given target point to map the variable to * @param rThisVariable given variable to be transferred * @see ValueMatrixInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Matrix>& rThisVariable ) * @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Vector>& rThisVariable ) */ double MappedValue( Element& sourceElement,Point& targetPoint, const Variable<double>& rThisVariable) { double newValue = 0.0; Vector shape_functions_values; shape_functions_values = sourceElement.GetGeometry().ShapeFunctionsValues(shape_functions_values, targetPoint); for(unsigned int i= 0; i< sourceElement.GetGeometry().size(); i++) { newValue += shape_functions_values[i] * sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable); } return newValue; } /** * Auxiliary function. * This one calculates the target value of given Variable by shape-function-based * interpolation of the nodal values from given source element to the given * target point that is assumed to lie within the source element * @return value of given variable in new point * @param sourceElement corresponding element in source mesh * @param targetPoint given target point to map the variable to * @param rThisVariable given variable to be transferred */ Vector MappedValue( Element& sourceElement,Point& targetPoint, const Variable<array_1d<double, 3 > >& rThisVariable) { Vector newValue = ZeroVector(3); Vector shape_functions_values; shape_functions_values = sourceElement.GetGeometry().ShapeFunctionsValues(shape_functions_values, targetPoint); for(unsigned int i=0; i<sourceElement.GetGeometry().size(); i++) { newValue += shape_functions_values[i] * sourceElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable); } return newValue; } /** * calculates for a point given with the physical coords newNode * the element oldElement where it lays in and the natural coords * localPoint within this element * @return whether a corresponding element and natural coords could be found * @param newNode physical coordinates of given point * @param OldMeshElementsArray Array of elements wherein the search should be performed * @param oldElement corresponding element for newNode * @param rResult corresponding natural coords for newNode * TODO: find a faster method for outside search (hextree? etc.), maybe outside this * function by restriction of OldMeshElementsArray */ bool FindPartnerElement( CoordinatesArrayType& newNode, ElementsArrayType& OldMeshElementsArray, Element::Pointer& oldElement,Point& rResult) { bool partner_found= false; //noalias(rResult)= ZeroVector(3); ElementsArrayType::Pointer OldElementsSet( new ElementsArrayType() ); std::vector<double > OldMinDist; bool newMinDistFound= false; int counter = 0; do { double minDist = 1.0e120; newMinDistFound= false; OldElementsSet->clear(); //loop over all master surfaces (global search) // this is brute force search and should be optimized for( ElementsArrayType::ptr_iterator it = OldMeshElementsArray.ptr_begin(); it != OldMeshElementsArray.ptr_end(); ++it ) { //loop over all nodes in tested element for( unsigned int n=0; n<(*it)->GetGeometry().size(); n++ ) { double dist = ((*it)->GetGeometry().GetPoint(n).X0()-newNode[0]) *((*it)->GetGeometry().GetPoint(n).X0()-newNode[0]) +((*it)->GetGeometry().GetPoint(n).Y0()-newNode[1]) *((*it)->GetGeometry().GetPoint(n).Y0()-newNode[1]) +((*it)->GetGeometry().GetPoint(n).Z0()-newNode[2]) *((*it)->GetGeometry().GetPoint(n).Z0()-newNode[2]); if( fabs(dist-minDist) < 1e-7 ) { OldElementsSet->push_back(*it); } else if( dist < minDist ) { bool alreadyUsed= false; for(unsigned int old_dist= 0; old_dist<OldMinDist.size(); old_dist++) { if(fabs(dist- OldMinDist[old_dist])< 1e-7 ) alreadyUsed= true; } if(!alreadyUsed) { OldElementsSet->clear(); minDist = dist; OldElementsSet->push_back(*it); newMinDistFound= true; } } } } OldMinDist.push_back(minDist); // KRATOS_WATCH(OldElementsSet->size()); for( ElementsArrayType::ptr_iterator it = OldElementsSet->ptr_begin(); it != OldElementsSet->ptr_end(); ++it ) { // std::cout << "checking elements list" << std::endl; if( (*it)->GetGeometry().IsInside( newNode, rResult ) ) { // std::cout << "isInside" << std::endl; // oldElement = *(*it); oldElement = (*it); partner_found = true; return partner_found; } } // std::cout << counter << std::endl; counter++; if( counter > 27 ) break; } while(newMinDistFound); if(!partner_found && GetEchoLevel() > 0) std::cout<<" !!!! NO PARTNER FOUND !!!! "<<std::endl; return partner_found; } //*************************************************************************** //*************************************************************************** /** * Auxiliary function. * This one calculates the target value of given Matrix-Variable at row firtsvalue * and column secondvalue by shape-function-based * interpolation of the nodal values from given source element to the given * target point that is assumed to lie within the source element * @return value of given variable in new point * @param sourceElement corresponding element in source mesh * @param targetPoint given target point to map the variable to * @param rThisVariable given variable to be transferred * @param firstvalue row index * @param secondvalue column index * @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue) */ double ValueMatrixInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Matrix>& rThisVariable, unsigned int firstvalue, unsigned int secondvalue ) { double newValue = 0.0; Vector shape_functions_values; shape_functions_values = oldElement.GetGeometry().ShapeFunctionsValues(shape_functions_values, localPoint); for(unsigned int i = 0; i < oldElement.GetGeometry().size(); ++i) { newValue += shape_functions_values[i] * oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable)(firstvalue,secondvalue); } return newValue; } /** * Auxiliary function. * This one calculates the target value of given Vector-Variable at firtsvalue * by shape-function-based * interpolation of the nodal values from given source element to the given * target point that is assumed to lie within the source element * @return value of given variable in new point * @param sourceElement corresponding element in source mesh * @param targetPoint given target point to map the variable to * @param rThisVariable given variable to be transferred * @param firstvalue index * @see ValueVectorInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue) */ double ValueVectorInOldMesh(Element& oldElement,Point& localPoint, const Variable<Kratos::Vector>& rThisVariable, unsigned int firstvalue ) { double newValue = 0.0; Vector shape_functions_values; shape_functions_values = oldElement.GetGeometry().ShapeFunctionsValues(shape_functions_values, localPoint); for(unsigned int i = 0; i < oldElement.GetGeometry().size(); ++i) { newValue += shape_functions_values[i] * oldElement.GetGeometry()[i].GetSolutionStepValue(rThisVariable)(firstvalue); } return newValue; } protected: LinearSolverType::Pointer mpLinearSolver; //**********AUXILIARY FUNCTION************************************************************** //****************************************************************************************** void ConstructMatrixStructure ( SpaceType::MatrixType& A, ElementsArrayType& rElements, ProcessInfo& CurrentProcessInfo ) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); Element::EquationIdVectorType ids; for(ElementsArrayType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; ++i_element) { bool element_is_active = true; if( (i_element)->IsDefined(ACTIVE) ) element_is_active = (i_element)->Is(ACTIVE); if( element_is_active ) { ids.resize((i_element)->GetGeometry().size()); for(unsigned int i = 0; i < (i_element)->GetGeometry().size(); ++i) { ids[i] = (i_element)->GetGeometry()[i].Id() - 1; } for(std::size_t i = 0 ; i < ids.size() ; ++i) { if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; j++) { if(ids[j] < equation_size) { AddUnique(row_indices,ids[j]); } } } } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; i++) { data_size += indices[i].size(); } A.reserve(data_size, false); //filling with zero the matrix (creating the structure) #ifndef _OPENMP for(std::size_t i = 0 ; i < indices.size() ; i++) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i,*it,0.00); } row_indices.clear(); } #else int number_of_threads = omp_get_max_threads(); vector<unsigned int> matrix_partition; CreatePartition(number_of_threads, indices.size(), matrix_partition); for( int k=0; k<number_of_threads; k++ ) { #pragma omp parallel if( omp_get_thread_num() == k ) { for( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ ) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i, *it, 0.00); } row_indices.clear(); } } } #endif } //**********AUXILIARY FUNCTION************************************************************** //****************************************************************************************** inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while ( i != endit && (*i) != candidate) { ++i; } if( i == endit ) { v.push_back(candidate); } } //**********AUXILIARY FUNCTION************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } private: int mEchoLevel; };//Class Scheme }//namespace Kratos. #endif /* KRATOS_VARIABLE_TRANSFER_UTILITY defined */
version2_1.c
// Compile with: // // // To specify the number of bodies in the world, the program optionally accepts // an integer as its first command line argument. #include <time.h> #include <sys/times.h> #include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <X11/Xlib.h> #include <unistd.h> #include "omp.h" #define WIDTH 1024 #define HEIGHT 768 // default number of bodies #define DEF_NUM_BODIES 200 // gravitational constant #define GRAV 10.0 // initial velocities are scaled by this value #define V_SCALAR 20.0 // initial masses are scaled by this value #define M_SCALAR 5.0 // radius scalar #define R_SCALAR 3 // coefficient of restitution determines the elasticity of a collision: C_REST = [0,1] // if C_REST = 0 -> perfectly inelastic (particles stick together) // if C_REST = 1 -> perfectly elastic (no loss of speed) #define C_REST 0.5 // set the iteration times #define iteration_times 100 // Must set 0 if run on Pi #define NOT_RUN_ON_PI 1 // World Buffer #define ORIGINAL_WORLD 0 #define BACK_WORLD 1 struct body { double x, y; // position double vx, vy; // velocity double m; // mass double r; // radius of the particle double x_back, y_back; }; struct world { struct body *bodies; int num_bodies; }; clock_t total_time = 0; //total_time.sec = 0; //total_time.usec = 0; /* This function initializes each particle's mass, velocity and position */ struct world *create_world(int num_bodies) { struct world *world = malloc(sizeof(struct world)); world->num_bodies = num_bodies; world->bodies = malloc(sizeof(struct body) * num_bodies); int i = 0; double x; double y; double rc; int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT; while (i < num_bodies) { x = drand48() * WIDTH; y = drand48() * HEIGHT; rc = sqrt((WIDTH / 2 - x) * (WIDTH / 2 - x) + (y - HEIGHT / 2) * (y - HEIGHT / 2)); if (rc <= min_dim / 2) { world->bodies[i].x = x; world->bodies[i].y = y; world->bodies[i].x_back = x; world->bodies[i].y_back = y; world->bodies[i].vx = V_SCALAR * (y - HEIGHT / 2) / rc; world->bodies[i].vy = V_SCALAR * (WIDTH / 2 - x) / rc; world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR; world->bodies[i].r = sqrt(world->bodies[i].m / M_PI) * R_SCALAR; i++; } } return world; } // set the foreground color given RGB values between 0..255. void set_color(Display *disp, GC gc, int r, int g, int b) { unsigned long int p; if (r < 0) r = 0; else if (r > 255) r = 255; if (g < 0) g = 0; else if (g > 255) g = 255; if (b < 0) b = 0; else if (b > 255) b = 255; p = (r << 16) | (g << 8) | (b); XSetForeground(disp, gc, p); } /* This function updates the screen with the new positions of each particle */ void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) { int i; double x, y, r, r2; // we turn off aliasing for faster draws set_color(disp, gc, 255, 255, 255); XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT); for (i = 0; i < world->num_bodies; i++) { r = world->bodies[i].r; x = world->bodies[i].x - r; y = world->bodies[i].y - r; r2 = r + r; // draw body set_color(disp, gc, 255 * 7 / 10, 255 * 7 / 10, 255 * 7 / 10); XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); set_color(disp, gc, 0, 0, 0); XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); } } void collision_step(struct world *world) { int a, b; double r, x, y, vx, vy; // Impose screen boundaries by reversing direction if body is off screen for (a = 0; a < world->num_bodies; a++) { r = world->bodies[a].r; x = world->bodies[a].x; y = world->bodies[a].y; vx = world->bodies[a].vx; vy = world->bodies[a].vy; if (x - r < 0) { // left edge if (vx < 0) { world->bodies[a].vx = -C_REST * vx; } world->bodies[a].x = r; } else if (x + r > WIDTH) { // right edge if (vx > 0) { world->bodies[a].vx = -C_REST * vx; } world->bodies[a].x = WIDTH - r; } if (y - r < 0) { // bottom edge if (vy < 0) { world->bodies[a].vy = -C_REST * vy; } world->bodies[a].y = r; } else if (y + r > HEIGHT) { // top edge if (vy > 0) { world->bodies[a].vy = -C_REST * vy; } world->bodies[a].y = HEIGHT - r; } } } void position_step(struct world *world, double time_res,int iteration) { int i, j; /* The forces array stores the x and y components of the total force acting * on each body. The forces are index like this: * F on body i in the x dir = F_x[i] * F on body i in the y dir = F_y[i] */ double *force_x = (double *) malloc(sizeof(double) * world->num_bodies); double *force_y = (double *) malloc(sizeof(double) * world->num_bodies); // initialize all forces to zero force_x = memset(force_x, 0, sizeof(double) * world->num_bodies); force_y = memset(force_y, 0, sizeof(double) * world->num_bodies); /* Compute the net force on each body */ #pragma omp parallel { #pragma omp for for (int i = 0; i < world->num_bodies; i++) { double d, d_cubed, diff_x, diff_y; for (int j = 0; j < world->num_bodies; j++) { if (i == j) { continue; } // Compute the x and y distances and total distance d between // bodies i and j if(iteration%2==0){ diff_x = world->bodies[j].x - world->bodies[i].x; diff_y = world->bodies[j].y - world->bodies[i].y; } else{ diff_x = world->bodies[j].x_back - world->bodies[i].x_back; diff_y = world->bodies[j].y_back - world->bodies[i].y_back; } d = sqrt((diff_x * diff_x) + (diff_y * diff_y)); if (d < 25) { d = 25; } d_cubed = d * d * d; // Add force due to j to total force on i force_x[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_x; force_y[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_y; } world->bodies[i].vx += force_x[i] * time_res / world->bodies[i].m; world->bodies[i].vy += force_y[i] * time_res / world->bodies[i].m; if (iteration%2== 0) { // Update positions world->bodies[i].x_back = world->bodies[i].x + world->bodies[i].vx * time_res; world->bodies[i].y_back = world->bodies[i].y + world->bodies[i].vy * time_res; } else { world->bodies[i].x = world->bodies[i].x_back + world->bodies[i].vx * time_res; world->bodies[i].y = world->bodies[i].y_back + world->bodies[i].vy * time_res; } } } } void step_world(struct world *world, double time_res,int iteration) { struct tms ttt; clock_t start, end; start = times(&ttt); position_step(world, time_res,iteration); end = times(&ttt); total_time += end - start; collision_step(world); } /* Main method runs initialize() and update() */ int main(int argc, char **argv) { //total_time.tv_sec = 0; //total_time.tv_usec = 0; /* get num bodies from the command line */ int num_bodies, threads; num_bodies = DEF_NUM_BODIES; threads = 1; if (argc == 2) { num_bodies = atoi(argv[1]); }; int thread_list[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; FILE *fstream = fopen("outdata", "a+"); fprintf(fstream, "Universe has %d bodies\n", num_bodies); for (int i = 0; i < 10; ++i) { threads = thread_list[i]; printf("Universe has %d bodies. %d Threads\n", num_bodies, threads); omp_set_num_threads(threads); /* set up the universe */ time_t cur_time; time(&cur_time); srand48((long) cur_time); // seed the RNG used in create_world struct world *world = create_world(num_bodies); /* set up graphics using Xlib */ #if NOT_RUN_ON_PI Display *disp = XOpenDisplay(NULL); int scr = DefaultScreen(disp); Window win = XCreateSimpleWindow( disp, RootWindow(disp, scr), 0, 0, WIDTH, HEIGHT, 0, BlackPixel(disp, scr), WhitePixel(disp, scr)); XStoreName(disp, win, "N-Body Simulator"); Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr), WIDTH, HEIGHT, DefaultDepth(disp, scr)); GC gc = XCreateGC(disp, back_buf, 0, 0); // Make sure we're only looking for messages about closing the window Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0); XSetWMProtocols(disp, win, &del_window, 1); XSelectInput(disp, win, StructureNotifyMask); XMapWindow(disp, win); XEvent event; // wait until window is mapped while (1) { XNextEvent(disp, &event); if (event.type == MapNotify) { break; } } #endif struct timespec delay = {0, 1000000000 / 60}; // for 60 FPS struct timespec remaining; double delta_t = 0.1; int ii; total_time = 0; for (ii = 0; ii < iteration_times; ii++) { // check if the window has been closed #if NOT_RUN_ON_PI if (XCheckTypedEvent(disp, ClientMessage, &event)) { break; } // we first draw to the back buffer then copy it to the front (`win`) draw_world(disp, back_buf, gc, world); XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0); #endif step_world(world, delta_t,ii); //if you want to watch the process in 60 FPS //nanosleep(&delay, &remaining); } // printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000); fprintf(fstream, "%d %lfs\n", threads, (double) total_time / (sysconf(_SC_CLK_TCK))); #if NOT_RUN_ON_PI XFreeGC(disp, gc); XFreePixmap(disp, back_buf); XDestroyWindow(disp, win); XCloseDisplay(disp); #endif } fclose(fstream); return 0; }
yolov2_forward_network_quantized.c
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #define GEMMCONV //#define SSE41 //#undef AVX #define MAX_VAL_8 (256/2 - 1) // 7-bit (1-bit sign) #define MAX_VAL_16 (256*256/2 - 1) // 15-bit (1-bit sign) #define MAX_VAL_32 (256*256*256*256/2 - 1) // 31-bit (1-bit sign) int max_abs(int src, int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } short int max_abs_short(short int src, short int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } // im2col.c int8_t im2col_get_pixel_int8(int8_t *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } // im2col.c //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_int8(int8_t* data_im, int channels, int height, int width, int ksize, int stride, int pad, int8_t* data_col) { int c, h, w; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel_int8(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j], MAX_VAL_16); c_tmp[j] = 0; } } free(c_tmp); } void gemm_nn_int8_int32(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int32_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j], MAX_VAL_32); c_tmp[j] = 0; } } free(c_tmp); } void forward_convolutional_layer_q(layer l, network_state state) { int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1 int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1 int i, j; int const out_size = out_h*out_w; typedef int16_t conv_t; // l.output conv_t *output_q = calloc(l.outputs, sizeof(conv_t)); state.input_int8 = (int8_t *)calloc(l.inputs, sizeof(int)); int z; for (z = 0; z < l.inputs; ++z) { int16_t src = state.input[z] * l.input_quant_multiplier; state.input_int8[z] = max_abs(src, MAX_VAL_8); } // Convolution int m = l.n; int k = l.size*l.size*l.c; int n = out_h*out_w; int8_t *a = l.weights_int8; int8_t *b = (int8_t *)state.workspace; conv_t *c = output_q; // int16_t // Use GEMM (as part of BLAS) im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); int t; // multi-thread gemm #pragma omp parallel for for (t = 0; t < m; ++t) { gemm_nn_int8_int16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); } free(state.input_int8); // Bias addition int fil; for (fil = 0; fil < l.n; ++fil) { for (j = 0; j < out_size; ++j) { output_q[fil*out_size + j] = output_q[fil*out_size + j] + l.biases_quant[fil]; } } // Activation if (l.activation == LEAKY) { for (i = 0; i < l.n*out_size; ++i) { output_q[i] = (output_q[i] > 0) ? output_q[i] : output_q[i] / 10; } } // De-scaling float ALPHA1 = 1 / (l.input_quant_multiplier * l.weights_quant_multiplier); for (i = 0; i < l.outputs; ++i) { l.output[i] = output_q[i] * ALPHA1; } free(output_q); } void yolov2_forward_network_q(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { forward_convolutional_layer_q(l, state); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cpu(l, state); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cpu(l, state); } else if (l.type == YOLO) { forward_yolo_layer_cpu(l, state); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; } } // detect on CPU float *network_predict_quantized(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.truth = 0; state.train = 0; state.delta = 0; yolov2_forward_network_q(net, state); // network on CPU //float *out = get_network_output(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; } /* Quantization-related */ void do_quantization(network net) { int counter = 0; int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; /* TODO: implement quantization The implementation given below is a naive version of per-network quantization; implement your own quantization that minimizes the mAP degradation */ printf("\n"); if (l->type == CONVOLUTIONAL) { // Quantize conv layer only size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; int i, fil; // Input Scaling if (counter >= net.input_calibration_size) { printf(" Warning: CONV%d has no corresponding input_calibration parameter - default value 16 will be used;\n", j); } l->input_quant_multiplier = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; // Using 16 as input_calibration as default value // l->input_quant_multiplier = floor(l->input_quant_multiplier*pow(2,12))/pow(2,12); ++counter; // Weight Quantization l->weights_quant_multiplier = 32; // Arbitrarily set to 32; you should devise your own method to calculate the weight multiplier for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { float w = l->weights[fil*filter_size + i] * l->weights_quant_multiplier; // Scale l->weights_int8[fil*filter_size + i] = max_abs(w, MAX_VAL_8); // Clip } } // Bias Quantization float biases_multiplier = (l->weights_quant_multiplier * l->input_quant_multiplier); for (fil = 0; fil < l->n; ++fil) { float b = l->biases[fil] * biases_multiplier; // Scale l->biases_quant[fil] = max_abs(b, MAX_VAL_16); // Clip } printf(" CONV%d multipliers: input %g, weights %g, bias %g \n", j, l->input_quant_multiplier, l->weights_quant_multiplier, biases_multiplier); } else { printf(" No quantization for layer %d (layer type: %d) \n", j, l->type); } } } // Save quantized weights, bias, and scale void save_quantized_model(network net) { int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; if (l->type == CONVOLUTIONAL) { size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; printf(" Saving quantized weights, bias, and scale for CONV%d \n", j); char weightfile[30]; char biasfile[30]; char scalefile[30]; sprintf(weightfile, "weights/CONV%d_W.txt", j); sprintf(biasfile, "weights/CONV%d_B.txt", j); sprintf(scalefile, "weights/CONV%d_S.txt", j); int k; FILE *fp_w = fopen(weightfile, "w"); for (k = 0; k < weights_size; k = k + 4) { uint8_t first = k < weights_size ? l->weights_int8[k] : 0; uint8_t second = k+1 < weights_size ? l->weights_int8[k+1] : 0; uint8_t third = k+2 < weights_size ? l->weights_int8[k+2] : 0; uint8_t fourth = k+3 < weights_size ? l->weights_int8[k+3] : 0; fprintf(fp_w, "%02x%02x%02x%02x\n", first, second, third, fourth); } fclose(fp_w); FILE *fp_b = fopen(biasfile, "w"); for (k = 0; k < l->n; k = k + 4) { uint16_t first = k < l->n ? l->biases_quant[k] : 0; uint16_t second = k+1 < l->n ? l->biases_quant[k+1] : 0; fprintf(fp_b, "%04x%04x\n", first, second); } fclose(fp_b); FILE *fp_s = fopen(scalefile, "w"); fprintf(fp_s, "%f\n", l->input_quant_multiplier); fclose(fp_s); } } }
DRACC_OMP_049_MxV_missing_free_other.c
/* Matrix Vector multiplication without deallocating the matrix "b" after the kernel finishes. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 512 int *a; int *b; int *c; int init(){ for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } int Mult(){ #pragma omp target enter data map(to:a[0:C],b[0:C*C]) map(alloc:c[0:C]) device(0) #pragma omp target device(0) { #pragma omp teams distribute parallel for for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ c[i]+=b[j+i*C]*a[j]; } } } #pragma omp target exit data map(from:c[0:C]) map(release:a[0:C]) device(0) return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = malloc(C*sizeof(int)); b = malloc(C*C*sizeof(int)); c = malloc(C*sizeof(int)); init(); Mult(); check(); free(a); free(b); free(c); return 0; }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequency of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return static_cast<int64_t>(num_data) * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output) const override; void PredictContribByMap(const std::unordered_map<int, double>& features, std::vector<std::unordered_map<int, double>>* output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } bool IsLinear() const override { return linear_tree_; } inline std::string ParserConfigStr() const override {return parser_config_str_;} protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current iteration * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief Parser config file content */ std::string parser_config_str_ = ""; #if defined(USE_CUDA) || defined(USE_CUDA_EXP) /*! \brief First order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> hessians_; #else /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; #endif /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; bool linear_tree_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
ast-dump-openmp-parallel-for.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp parallel for for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp parallel for for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp parallel for collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp parallel for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp parallel for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:4:1, col:25> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:10:1, col:25> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:17:1, col:37> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:26, col:36> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:35> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:35> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPParallelForDirective {{.*}} <line:24:1, col:37> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:26, col:36> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:35> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:35> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPParallelForDirective {{.*}} <line:31:1, col:37> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:26, col:36> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:35> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:35> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-for.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
GB_binop__land_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__land_int32 // A.*B function (eWiseMult): GB_AemultB__land_int32 // A*D function (colscale): GB_AxD__land_int32 // D*A function (rowscale): GB_DxB__land_int32 // C+=B function (dense accum): GB_Cdense_accumB__land_int32 // C+=b function (dense accum): GB_Cdense_accumb__land_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int32 // C=scalar+B GB_bind1st__land_int32 // C=scalar+B' GB_bind1st_tran__land_int32 // C=A+scalar GB_bind2nd__land_int32 // C=A'+scalar GB_bind2nd_tran__land_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ((x != 0) && (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT32 || GxB_NO_LAND_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__land_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__land_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__land_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__land_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__land_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__land_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__land_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__land_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__land_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__land_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__land_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dlook_ahead_update.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /************************************************************************/ /*! @file * \brief Look-ahead update of the Schur complement. * * <pre> * -- Distributed SuperLU routine (version 5.4) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * * Modified: * September 18, 2017 * June 1, 2018 add parallel AWPM pivoting; add back arrive_at_ublock() * */ #include <assert.h> /* assertion doesn't work if NDEBUG is defined */ iukp = iukp0; /* point to the first block in index[] */ rukp = rukp0; /* point to the start of nzval[] */ j = jj0 = 0; /* After the j-loop, jj0 points to the first block in U outside look-ahead window. */ #if 0 for (jj = 0; jj < nub; ++jj) assert(perm_u[jj] == jj); /* Sherry */ #endif #ifdef ISORT while (j < nub && iperm_u[j] <= k0 + num_look_aheads) #else while (j < nub && perm_u[2 * j] <= k0 + num_look_aheads) #endif { double zero = 0.0; #if 1 /* Search is needed because a permutation perm_u is involved for j */ /* Search along the row for the pointers {iukp, rukp} pointing to * block U(k,j). * j -- current block in look-ahead window, initialized to 0 on entry * iukp -- point to the start of index[] metadata * rukp -- point to the start of nzval[] array * jb -- block number of block U(k,j), update destination column */ arrive_at_ublock( j, &iukp, &rukp, &jb, &ljb, &nsupc, iukp0, rukp0, usub, perm_u, xsup, grid ); #else jb = usub[iukp]; ljb = LBj (jb, grid); /* Local block number of U(k,j). */ nsupc = SuperSize(jb); iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */ #endif j++; jj0++; jj = iukp; while (usub[jj] == klst) ++jj; /* Skip zero segments */ ldu = klst - usub[jj++]; ncols = 1; /* This loop computes ldu. */ for (; jj < iukp + nsupc; ++jj) { /* for each column jj in block U(k,j) */ segsize = klst - usub[jj]; if (segsize) { ++ncols; if (segsize > ldu) ldu = segsize; } } #if ( DEBUGlevel>=3 ) ++num_update; #endif #if ( DEBUGlevel>=3 ) printf ("(%d) k=%d,jb=%d,ldu=%d,ncols=%d,nsupc=%d\n", iam, k, jb, ldu, ncols, nsupc); ++num_copy; #endif /* Now copy one block U(k,j) to bigU for GEMM, padding zeros up to ldu. */ tempu = bigU; /* Copy one block U(k,j) to bigU for GEMM */ for (jj = iukp; jj < iukp + nsupc; ++jj) { segsize = klst - usub[jj]; if (segsize) { lead_zero = ldu - segsize; for (i = 0; i < lead_zero; ++i) tempu[i] = zero; tempu += lead_zero; for (i = 0; i < segsize; ++i) { tempu[i] = uval[rukp + i]; } rukp += segsize; tempu += segsize; } } tempu = bigU; /* set back to the beginning of the buffer */ nbrow = lsub[1]; /* number of row subscripts in L(:,k) */ if (myrow == krow) nbrow = lsub[1] - lsub[3]; /* skip diagonal block for those rows. */ // double ttx =SuperLU_timer_(); int current_b = 0; /* Each thread starts searching from first block. This records the moving search target. */ lptr = lptr0; /* point to the start of index[] in supernode L(:,k) */ luptr = luptr0; #ifdef _OPENMP /* Sherry -- examine all the shared variables ?? 'firstprivate' ensures that the private variables are initialized to the values before entering the loop. */ #pragma omp parallel for \ firstprivate(lptr,luptr,ib,current_b) private(lb) \ default(shared) schedule(dynamic) #endif for (lb = 0; lb < nlb; lb++) { /* Loop through each block in L(:,k) */ int temp_nbrow; /* automatic variable is private */ /* Search for the L block that my thread will work on. No need to search from 0, can continue at the point where it is left from last iteration. Note: Blocks may not be sorted in L. Different thread picks up different lb. */ for (; current_b < lb; ++current_b) { temp_nbrow = lsub[lptr + 1]; /* Number of full rows. */ lptr += LB_DESCRIPTOR; /* Skip descriptor. */ lptr += temp_nbrow; /* move to next block */ luptr += temp_nbrow; /* move to next block */ } #ifdef _OPENMP int_t thread_id = omp_get_thread_num (); #else int_t thread_id = 0; #endif double * tempv = bigV + ldt*ldt*thread_id; int *indirect_thread = indirect + ldt * thread_id; int *indirect2_thread = indirect2 + ldt * thread_id; ib = lsub[lptr]; /* block number of L(i,k) */ temp_nbrow = lsub[lptr + 1]; /* Number of full rows. */ /* assert (temp_nbrow <= nbrow); */ lptr += LB_DESCRIPTOR; /* Skip descriptor. */ /*if (thread_id == 0) tt_start = SuperLU_timer_();*/ /* calling gemm */ stat->ops[FACT] += 2.0 * (flops_t)temp_nbrow * ldu * ncols; #if defined (USE_VENDOR_BLAS) dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &lusup[luptr + (knsupc - ldu) * nsupr], &nsupr, tempu, &ldu, &beta, tempv, &temp_nbrow, 1, 1); #else dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, &lusup[luptr + (knsupc - ldu) * nsupr], &nsupr, tempu, &ldu, &beta, tempv, &temp_nbrow ); #endif #if 0 if (thread_id == 0) { tt_end = SuperLU_timer_(); LookAheadGEMMTimer += tt_end - tt_start; tt_start = tt_end; } #endif /* Now scattering the output. */ if (ib < jb) { /* A(i,j) is in U. */ dscatter_u (ib, jb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, lsub, usub, tempv, Ufstnz_br_ptr, Unzval_br_ptr, grid); } else { /* A(i,j) is in L. */ dscatter_l (ib, ljb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, usub, lsub, tempv, indirect_thread, indirect2_thread, Lrowind_bc_ptr, Lnzval_bc_ptr, grid); } ++current_b; /* Move to next block. */ lptr += temp_nbrow; luptr += temp_nbrow; #if 0 if (thread_id == 0) { tt_end = SuperLU_timer_(); LookAheadScatterTimer += tt_end - tt_start; } #endif } /* end parallel for lb = 0, nlb ... all blocks in L(:,k) */ iukp += nsupc; /* Mov to block U(k,j+1) */ /* =========================================== * * == factorize L(:,j) and send if possible == * * =========================================== */ kk = jb; /* destination column that is just updated */ kcol = PCOL (kk, grid); #ifdef ISORT kk0 = iperm_u[j - 1]; #else kk0 = perm_u[2 * (j - 1)]; #endif look_id = kk0 % (1 + num_look_aheads); if (look_ahead[kk] == k0 && kcol == mycol) { /* current column is the last dependency */ look_id = kk0 % (1 + num_look_aheads); /* Factor diagonal and subdiagonal blocks and test for exact singularity. */ factored[kk] = 0; double tt1 = SuperLU_timer_(); PDGSTRF2(options, kk0, kk, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); pdgstrf2_timer += SuperLU_timer_() - tt1; /* stat->time7 += SuperLU_timer_() - ttt1; */ /* Multicasts numeric values of L(:,kk) to process rows. */ send_req = send_reqs[look_id]; msgcnt = msgcnts[look_id]; lk = LBj (kk, grid); /* Local block number. */ lsub1 = Lrowind_bc_ptr[lk]; lusup1 = Lnzval_bc_ptr[lk]; if (lsub1) { msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR; msgcnt[1] = lsub1[1] * SuperSize (kk); } else { msgcnt[0] = 0; msgcnt[1] = 0; } scp = &grid->rscp; /* The scope of process row. */ for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, kk0) /* (4*kk0)%tag_ub */ , scp->comm, &send_req[pj]); MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj, SLU_MPI_TAG (1, kk0) /* (4*kk0+1)%tag_ub */ , scp->comm, &send_req[pj + Pc]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; msg_cnt += 2; msg_vol += msgcnt[0] * iword + msgcnt[1] * dword; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] -2- Send L(:,%4d): #lsub %4d, #lusup %4d to Pj %2d, tags %d:%d \n", iam, kk, msgcnt[0], msgcnt[1], pj, SLU_MPI_TAG(0,kk0), SLU_MPI_TAG(1,kk0)); #endif } /* end if ( ToSendR[lk][pj] != EMPTY ) */ } /* end for pj ... */ } /* end if( look_ahead[kk] == k0 && kcol == mycol ) */ } /* end while j < nub and perm_u[j] <k0+NUM_LOOK_AHEAD */
uni_plymc.h
/* * plymc.h * filter_plymc * * Created by Paolo Cignoni on 10/23/09. * Copyright 2009 ISTI - CNR. All rights reserved. * */ #ifndef __PLYMC_H__ #define __PLYMC_H__ #ifndef WIN32 #define _int64 long long #define __int64 long long #define __cdecl #endif #include <cstdio> #include <time.h> #include <float.h> #include <math.h> #include <locale> #include <iostream> //#include <tchar.h> #include <list> #include <limits> #include <vcg/space/index/grid_static_ptr.h> #include <vcg/simplex/vertex/base.h> #include <vcg/simplex/face/base.h> #include <vcg/complex/used_types.h> #include <vcg/complex/complex.h> #include <vcg/complex/algorithms/update/position.h> #include <vcg/complex/algorithms/update/normal.h> #include <vcg/complex/algorithms/update/quality.h> #include <vcg/complex/algorithms/update/edges.h> #include <vcg/complex/algorithms/update/topology.h> #include <vcg/complex/algorithms/update/flag.h> #include <vcg/complex/algorithms/update/bounding.h> #include <vcg/math/histogram.h> #include <vcg/complex/algorithms/clean.h> #include <wrap/io_trimesh/import.h> #include <wrap/io_trimesh/export_ply.h> #include <wrap/ply/plystuff.h> #include <vcg/complex/algorithms/create/marching_cubes.h> #include <vcg/complex/algorithms/create/extended_marching_cubes.h> #include "trivial_walker.h" // local optimization #include <vcg/complex/algorithms/local_optimization.h> #include <vcg/complex/algorithms/local_optimization/tri_edge_collapse.h> #include <vcg/complex/algorithms/local_optimization/tri_edge_collapse_quadric.h> #include <vcg/simplex/edge/base.h> #include <stdarg.h> #include "volume.h" #include "tri_edge_collapse_mc.h" #include <osg/Timer> typedef bool CallBackPosTotal(const int pos, const int total,unsigned long long tick,const char * str ); namespace vcg { namespace tri { // Simple prototype for later use... template<class MeshType> void MCSimplify( MeshType &m, float perc, bool preserveBB=true, vcg::CallBackPos *cb=0); template < class SMesh, class MeshProvider> class PlyMC { public: class MCVertex; class MCEdge; class MCFace; class MCUsedTypes: public vcg::UsedTypes <vcg::Use<MCVertex>::template AsVertexType, vcg::Use<MCEdge >::template AsEdgeType, vcg::Use<MCFace >::template AsFaceType >{}; class MCVertex : public Vertex< MCUsedTypes, vertex::Coord3f, vertex::Color4b, vertex::Mark, vertex::VFAdj, vertex::BitFlags, vertex::Qualityf>{}; class MCEdge : public Edge<MCUsedTypes,edge::VertexRef> { public: inline MCEdge() {}; inline MCEdge( MCVertex * v0, MCVertex * v1){this->V(0) = v0; this->V(1) = v1; }; static inline MCEdge OrderedEdge(MCVertex* v0,MCVertex* v1){ if(v0<v1) return MCEdge(v0,v1); else return MCEdge(v1,v0); } }; class MCFace : public Face< MCUsedTypes, face::InfoOcf, face::VertexRef, face::FFAdjOcf, face::VFAdjOcf, face::BitFlags> {}; class MCMesh : public vcg::tri::TriMesh< std::vector< MCVertex>, face::vector_ocf< MCFace > > {}; //****************************************** //typedef Voxel<float> Voxelf; typedef Voxelfc Voxelf; //****************************************** class Parameter { public: Parameter() { NCell=10000; WideNum= 3; WideSize=0; VoxSize=0; IPosS=Point3i(0,0,0); // SubVolume Start IPosE=Point3i(0,0,0); // SubVolume End IPosB=Point3i(0,0,0); // SubVolume to restart from in lexicographic order (useful for crashes) //IPos=Point3i(0,0,0); IDiv=Point3i(1,1,1); VerboseLevel=0; SliceNum=1; FillThr=12; ExpAngleDeg=30; SmoothNum=1; RefillNum=1; IntraSmoothFlag = false; QualitySmoothAbs = 0.0f; // 0 means un-setted value. QualitySmoothVox = 3.0f; // expressed in voxel OffsetFlag=false; OffsetThr=-3; GeodesicQualityFlag=true; PLYFileQualityFlag=false; SaveVolumeFlag=false; SafeBorder=1; CleaningFlag=false; SimplificationFlag=false; VertSplatFlag=false; MergeColor=false; basename = "plymcout"; } int NCell; int WideNum; float WideSize; float VoxSize; Point3i IPosS; // SubVolume Start Point3i IPosE; // SubVolume End Point3i IPosB; // SubVolume to restart from in lexicographic order (useful for crashes) //Point3i IPos; Point3i IDiv; int VerboseLevel; int SliceNum; int FillThr; float ExpAngleDeg; int SmoothNum; int RefillNum; bool IntraSmoothFlag; float QualitySmoothAbs; // 0 means un-setted value. float QualitySmoothVox; // expressed in voxel bool OffsetFlag; float OffsetThr; bool GeodesicQualityFlag; bool PLYFileQualityFlag; bool SaveVolumeFlag; int SafeBorder; bool CleaningFlag; bool SimplificationFlag; bool VertSplatFlag; bool MergeColor; std::string basename; std::vector<std::string> OutNameVec; std::vector<std::string> OutNameSimpVec; }; //end Parameter class /// PLYMC Data MeshProvider MP; Parameter p; std::vector< std::vector<std::vector<Volume<Voxelf> > > >vVV; /// PLYMC Methods bool InitMesh(Volume<Voxelf> &VV,SMesh &m, const char *filename, Matrix44f Tr) { typename SMesh::VertexIterator vi; int loadmask; int ret = tri::io::Importer<SMesh>::Open(m,filename,loadmask); tri::Clean<SMesh>::FlipMesh(m); if(ret) { printf("Error: unabe to open mesh '%s'",filename); return false; } if(p.VertSplatFlag) { if(!(loadmask & tri::io::Mask::IOM_VERTNORMAL)) { printf("Error, pointset MUST have normals"); exit(-1); } else printf("Ok Pointset has normals\n"); for(vi=m.vert.begin(); vi!=m.vert.end();++vi) if(math::Abs(SquaredNorm((*vi).N())-1.0)>0.0001) { printf("Error: mesh has not per vertex normalized normals\n"); return false; } if(!(loadmask & tri::io::Mask::IOM_VERTQUALITY)) tri::UpdateQuality<SMesh>::VertexConstant(m,0); tri::UpdateNormals<SMesh>::PerVertexMatrix(m,Tr); //if(!(loadmask & tri::io::Mask::IOM_VERTCOLOR)) // saveMask &= ~tri::io::Mask::IOM_VERTCOLOR; } else // processing for triangle meshes { if(p.CleaningFlag){ int dup = tri::Clean<SMesh>::RemoveDuplicateVertex(m); int unref = tri::Clean<SMesh>::RemoveUnreferencedVertex(m); printf("Removed %i duplicates and %i unref",dup,unref); } tri::UpdateNormals<SMesh>::PerVertexNormalizedPerFaceNormalized(m); if(p.GeodesicQualityFlag) { tri::UpdateTopology<SMesh>::VertexFace(m); tri::UpdateFlags<SMesh>::FaceBorderFromVF(m); tri::UpdateQuality<SMesh>::VertexGeodesicFromBorder(m); } } tri::UpdatePosition<SMesh>::Matrix(m,Tr,false); tri::UpdateBounding<SMesh>::Box(m); //printf("Init Mesh %s (%ivn,%ifn)\n",filename,m.vn,m.fn); for(vi=m.vert.begin(); vi!=m.vert.end();++vi) VV.Interize((*vi).P()); return true; } // This function add a mesh (or a point cloud to the volume) // the point cloud MUST have normalized vertex normals. bool AddMeshToVolumeM(Volume<Voxelf> &VV,SMesh &m, std::string meshname, const double w ) { typename SMesh::VertexIterator vi; typename SMesh::FaceIterator fi; if(!m.bbox.Collide(VV.SubBoxSafe)) return false; size_t found =meshname.find_last_of("/\\"); std::string shortname = meshname.substr(found+1); Volume <Voxelf> B; B.Init(VV); bool res=false; double quality=0; // Now add the mesh to the volume if(!p.VertSplatFlag) { float minq=std::numeric_limits<float>::max(), maxq=-std::numeric_limits<float>::max(); // Calcolo range qualita geodesica PER FACCIA come media di quelle per vertice for(fi=m.face.begin(); fi!=m.face.end();++fi){ (*fi).Q()=((*fi).V(0)->Q()+(*fi).V(1)->Q()+(*fi).V(2)->Q())/3.0f; minq=std::min((*fi).Q(),minq); maxq=std::max((*fi).Q(),maxq); } // La qualita' e' inizialmente espressa come distanza assoluta dal bordo della mesh //printf("Q [%4.2f %4.2f] \n",minq,maxq); bool closed=false; if(minq==maxq) closed=true; // se la mesh e' chiusa la ComputeGeodesicQuality mette la qualita a zero ovunque // Classical approach: scan each face int tt0=clock(); //printf("---- Face Rasterization"); for(fi=m.face.begin(); fi!=m.face.end();++fi) { if(closed || (p.PLYFileQualityFlag==false && p.GeodesicQualityFlag==false)) quality=1.0; else quality=w*(*fi).Q(); if(quality) res |= B.ScanFace((*fi).V(0)->P(),(*fi).V(1)->P(),(*fi).V(2)->P(),quality,(*fi).N()); } // printf(" : %li\n",clock()-tt0); } else { // Splat approach add only the vertices to the volume printf("Vertex Splatting\n"); for(vi=m.vert.begin();vi!=m.vert.end();++vi) { if(p.PLYFileQualityFlag==false) quality=1.0; else quality=w*(*vi).Q(); if(quality) res |= B.SplatVert((*vi).P(),quality,(*vi).N(),(*vi).C()); } } if(!res) return false; int vstp=0; if(p.VerboseLevel>0) { B.SlicedPPM(shortname.c_str(),std::string(SFormat("%02i",vstp)).c_str(),p.SliceNum ); B.SlicedPPMQ(shortname.c_str(),std::string(SFormat("%02i",vstp)).c_str(),p.SliceNum ); vstp++; } for(int i=0;i<p.WideNum;++i) { B.Expand(math::ToRad(p.ExpAngleDeg)); if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02ie",vstp++),p.SliceNum ); B.Refill(p.FillThr); if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02if",vstp++),p.SliceNum ); if(p.IntraSmoothFlag) { Volume <Voxelf> SM; SM.Init(VV); SM.CopySmooth(B,1,p.QualitySmoothAbs); B=SM; if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02is",vstp++),p.SliceNum ); // if(VerboseLevel>1) B.SlicedPPMQ(shortname,SFormat("%02is",vstp),SliceNum ); } } if(p.SmoothNum>0) { Volume <Voxelf> SM; SM.Init(VV); SM.CopySmooth(B,1,p.QualitySmoothAbs); B=SM; if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02isf",vstp++),p.SliceNum ); } VV.Merge(B); if(p.VerboseLevel>0) VV.SlicedPPMQ(std::string("merge_").c_str(),shortname.c_str(),p.SliceNum ); return true; } void Process(CallBackPosTotal *cb=0) { unsigned long long startTick=osg::Timer::instance()->tick(); printf("bbox scanning...\n"); fflush(stdout); Matrix44f Id; Id.SetIdentity(); MP.InitBBox(); printf("Completed BBox Scanning \n"); Box3f fullb = MP.fullBB(); assert (!fullb.IsNull()); assert (!fullb.IsEmpty()); // Calcolo gridsize Point3i gridsize; Point3f voxdim; fullb.Offset(fullb.Diag() * 0.1 ); int saveMask=0; if(p.MergeColor) saveMask |= tri::io::Mask::IOM_VERTCOLOR ; voxdim = fullb.max - fullb.min; int TotAdd=0,TotMC=0,TotSav=0; // if kcell==0 the number of cells is computed starting from required voxel size; __int64 cells; if(p.NCell>0) cells = (__int64)(p.NCell)*(__int64)(1000); else cells = (__int64)(voxdim[0]/p.VoxSize) * (__int64)(voxdim[1]/p.VoxSize) *(__int64)(voxdim[2]/p.VoxSize) ; Box3i globalBox; { Volume<Voxelf> B; // local to this small block Box3f fullbf; fullbf.Import(fullb); B.Init(cells,fullbf,p.IDiv,p.IPosS); B.Dump(stdout); if(p.WideSize>0) p.WideNum=p.WideSize/B.voxel.Norm(); globalBox=B.SubPart; // Now the volume has been determined; the quality threshold in absolute units can be computed if(p.QualitySmoothAbs==0) p.QualitySmoothAbs= p.QualitySmoothVox * B.voxel.Norm(); } bool res=false; vVV.resize(p.IDiv[0]); for(int i=0; i<vVV.size(); i++){ vVV[i].resize(p.IDiv[1]); for(int j=0; j<vVV[i].size(); j++) vVV[i][j].resize(p.IDiv[2]); } //#pragma omp parallel for for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx) for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy) for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz) if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >= (p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB { printf("----------- SubBlock %2i %2i %2i ----------\n",xx,yy,zz); //Volume<Voxelf> B; Volume<Voxelf> &VV =vVV[xx][yy][zz]; int t0=clock(); Box3f fullbf; fullbf.Import(fullb); //VV.DeltaVoxelSafe=1; Point3i IPos; IPos[0]=xx; IPos[1]=yy; IPos[2]=zz; VV.Init(cells,fullbf,p.IDiv,IPos); printf("\n\n --------------- Allocated subcells. %i\n",VV.Allocated()); std::string filename=p.basename; if(p.IDiv!=Point3i(1,1,1)) { std::string subvoltag; VV.GetSubVolumeTag(subvoltag); filename+=subvoltag; } /********** Grande loop di scansione di tutte le mesh *********/ for(int i=0;i<MP.size();++i) { Box3f bbb= MP.bb(i); /**********************/ cb((i+1),MP.size(),startTick,"Vol"); /**********************/ // if bbox of mesh #i is part of the subblock, then process it #pragma omp critical if(bbb.Collide(VV.SubBoxSafe)) { SMesh *sm; if(!MP.Find(i,sm) ) { res = InitMesh(VV,*sm,MP.MeshName(i).c_str(),MP.Tr(i)); if(!res) { printf("Failed Init of mesh %s",MP.MeshName(i).c_str()); //break; } } res |= AddMeshToVolumeM(VV,*sm, MP.MeshName(i),MP.W(i)); } } //B.Normalize(1); printf("End Scanning\n"); if(p.OffsetFlag) { VV.Offset(p.OffsetThr); if (p.VerboseLevel>0) { VV.SlicedPPM("finaloff","__",p.SliceNum); VV.SlicedPPMQ("finaloff","__",p.SliceNum); } } //if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02im",i),p.SliceNum ); for(int i=0;i<p.RefillNum;++i) { //VV.Refill(3,6); if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02imsr",i),p.SliceNum ); //if(VerboseLevel>1) VV.SlicedPPMQ(filename,SFormat("_%02ips",i++),SliceNum ); } for(int i=0;i<p.SmoothNum;++i) { Volume <Voxelf> SM; SM.Init(VV); printf("%2i/%2i: ",i,p.SmoothNum); SM.CopySmooth(VV,1,p.QualitySmoothAbs); VV=SM; // VV.Refill(3,6); if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02ims",i),p.SliceNum ); } int t1=clock(); //-------- TotAdd+=t1-t0; printf("Extracting surface...\r"); if (p.VerboseLevel>0) { VV.SlicedPPM("final","__",p.SliceNum); VV.SlicedPPMQ("final","__",p.SliceNum); } //VV.Write("test",0,0); //MCMesh me; // } int cnt=0; for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx) for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy) for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz) if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >= (p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB { Volume<Voxelf> &VV =vVV[xx][yy][zz]; Box3i ibox; for(int xxx=p.IPosS[0];xxx<=p.IPosE[0];++xxx) for(int yyy=p.IPosS[1];yyy<=p.IPosE[1];++yyy){ if(xxx==xx && yyy==yy) continue; Box3i SubPartSafe=vVV[xxx][yyy][zz].SubPartSafe; if(!SubPartSafe.Collide(VV.SubPartSafe)) continue; ibox.min[0] = std::max(SubPartSafe.min[0],VV.SubPartSafe.min[0]); ibox.min[1] = std::max(SubPartSafe.min[1],VV.SubPartSafe.min[1]); ibox.min[2] = std::max(SubPartSafe.min[2],VV.SubPartSafe.min[2]); ibox.max[0] = std::min(SubPartSafe.max[0],VV.SubPartSafe.max[0]); ibox.max[1] = std::min(SubPartSafe.max[1],VV.SubPartSafe.max[1]); ibox.max[2] = std::min(SubPartSafe.max[2],VV.SubPartSafe.max[2]); // ibox=globalBox; /* printf("%d %d %d -- %d %d %d\n",ibox.min[0],ibox.min[1],ibox.min[2], ibox.max[0],ibox.max[1],ibox.max[2]); printf("A %d %d %d -- %d %d %d\n",SubPartSafe.min[0],SubPartSafe.min[1],SubPartSafe.min[2], SubPartSafe.max[0],SubPartSafe.max[1],SubPartSafe.max[2]); printf("B %d %d %d -- %d %d %d\n",VV.SubPartSafe.min[0],VV.SubPartSafe.min[1],VV.SubPartSafe.min[2], VV.SubPartSafe.max[0],VV.SubPartSafe.max[1],VV.SubPartSafe.max[2]);*/ for(int xxxx=ibox.min[0];xxxx<=ibox.max[0];++xxxx) for(int yyyy=ibox.min[1];yyyy<=ibox.max[1];++yyyy) for(int zzzz=ibox.min[2];zzzz<=ibox.max[2];++zzzz){ // printf("%d %d %d\n",xxxx,yyyy,zzzz); if(vVV[xxx][yyy][zz].Val(xxxx,yyyy,zzzz)!=VV.Val(xxxx,yyyy,zzzz)){ // printf("%f %f\n",vVV[xxx][yyy][zz].Val(xxxx,yyyy,zzzz),VV.Val(xxxx,yyyy,zzzz)); if(vVV[xxx][yyy][zz].Val(xxxx,yyyy,zzzz) == 1000.000) vVV[xxx][yyy][zz].V(xxxx,yyyy,zzzz).Set(VV.V(xxxx,yyyy,zzzz)); else if(VV.Val(xxxx,yyyy,zzzz) == 1000.000) VV.V(xxxx,yyyy,zzzz).Set(vVV[xxx][yyy][zz].V(xxxx,yyyy,zzzz)); else{ vVV[xxx][yyy][zz].V(xxxx,yyyy,zzzz).Blend(VV.V(xxxx,yyyy,zzzz),0.5); VV.V(xxxx,yyyy,zzzz).Set( vVV[xxx][yyy][zz].V(xxxx,yyyy,zzzz)); } } // vVV[xxx][yyy][zz].V(xxxx,yyyy,zzzz).Set(VV.V(xxxx,yyyy,zzzz)); // VV.V(xxxx,yyyy,zzzz).SetB(false); } } /* std::string filename="final"; if(p.IDiv!=Point3i(1,1,1)) { std::string subvoltag; VV.GetSubVolumeTag(subvoltag); filename+=subvoltag; } VV.SlicedPPM(filename.c_str(),"__",1); VV.SlicedPPMQ(filename.c_str(),"__",1); VV.Dump(stdout);*/ printf("----------- Equalizing corner SubBlock %2i %2i %2i ----------\n",xx,yy,zz); //Volume<Voxelf> B; } //#pragma omp parallel for for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx) for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy) for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz) if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >= (p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB { Volume<Voxelf> &VV =vVV[xx][yy][zz]; std::string filename=p.basename; // if(p.IDiv!=Point3i(1,1,1)) { std::string subvoltag; VV.GetSubVolumeTag(subvoltag); filename+=subvoltag; } MCMesh me; if(res) { typedef vcg::tri::TrivialWalker<MCMesh, Volume <Voxelf> > Walker; typedef vcg::tri::MarchingCubes<MCMesh, Walker> MarchingCubes; //typedef vcg::tri::ExtendedMarchingCubes<MCMesh, Walker> ExtendedMarchingCubes; Walker walker; MarchingCubes mc(me, walker); Box3i currentSubBox=VV.SubPartSafe; Point3i currentSubBoxRes=VV.ssz; /**********************/ cb(50,50,0,"Step 2: Marching Cube..."); /**********************/ walker.BuildMesh(me,VV,mc,currentSubBox,currentSubBoxRes); typename MCMesh::VertexIterator vi; Box3f bbb; bbb.Import(VV.SubPart); for(vi=me.vert.begin();vi!=me.vert.end();++vi) { if(!bbb.IsIn((*vi).P())) vcg::tri::Allocator< MCMesh >::DeleteVertex(me,*vi); VV.DeInterize((*vi).P()); } typename MCMesh::FaceIterator fi; for (fi = me.face.begin(); fi != me.face.end(); ++fi) { if((*fi).V(0)->IsD() || (*fi).V(1)->IsD() || (*fi).V(2)->IsD() ) vcg::tri::Allocator< MCMesh >::DeleteFace(me,*fi); else std::swap((*fi).V1(0), (*fi).V2(0)); } int t2=clock(); //-------- // TotMC+=t2-t1; if(me.vn >0 || me.fn >0) { p.OutNameVec.push_back(filename+std::string(".ply")); tri::io::ExporterPLY<MCMesh>::Save(me,p.OutNameVec.back().c_str(),saveMask); if(p.SimplificationFlag) { MCMesh me2; tri::io::ImporterPLY<MCMesh>::Open(me2,p.OutNameVec.back().c_str(),saveMask); /**********************/ cb(50,50,0,"Step 3: Simplify mesh..."); /**********************/ p.OutNameSimpVec.push_back(filename+std::string(".d.ply")); me2.face.EnableVFAdjacency(); me2.face.EnableFFAdjacency(); vcg::tri::UpdateTopology<MCMesh>::VertexFace(me2); vcg::tri::UpdateTopology<MCMesh>::FaceFace(me2); tri::Clean<MCMesh>::RemoveNonManifoldVertex(me2); tri::Clean<MCMesh>::RemoveNonManifoldFace(me2); tri::Clean<MCMesh>::RemoveTVertexByFlip(me2,20,true); tri::Clean<MCMesh>::RemoveFaceFoldByFlip(me2); MCSimplify<MCMesh>(me2, VV.voxel[0]/4.0); tri::Allocator<MCMesh>::CompactFaceVector(me2); // me2.face.EnableFFAdjacency(); tri::Clean<MCMesh>::RemoveTVertexByFlip(me2,20,true); tri::Clean<MCMesh>::RemoveFaceFoldByFlip(me2); tri::io::ExporterPLY<MCMesh>::Save(me2,p.OutNameSimpVec.back().c_str(),saveMask); } } int t3=clock(); //-------- TotSav+=t3-t2; } printf("Mesh Saved '%s': %8d vertices, %8d faces \n",(filename+std::string(".ply")).c_str(),me.vn,me.fn); printf("Adding Meshes %8i\n",TotAdd); printf("MC %8i\n",TotMC); printf("Saving %8i\n",TotSav); printf("Total %8i\n",TotAdd+TotMC+TotSav); } else { printf("----------- skipping SubBlock %2i %2i %2i ----------\n",xx,yy,zz); } } }; //end PlyMC class template < class MeshType> class PlyMCTriEdgeCollapse: public MCTriEdgeCollapse< MeshType, PlyMCTriEdgeCollapse<MeshType> > { public: typedef MCTriEdgeCollapse< MeshType, PlyMCTriEdgeCollapse > MCTEC; typedef typename MeshType::VertexType::EdgeType EdgeType; inline PlyMCTriEdgeCollapse( const EdgeType &p, int i) :MCTEC(p,i){} }; template< class MeshType> void MCSimplify( MeshType &m, float absoluteError, bool preserveBB, vcg::CallBackPos *cb) { typedef PlyMCTriEdgeCollapse<MeshType> MyColl; tri::UpdateBounding<MeshType>::Box(m); tri::UpdateTopology<MeshType>::VertexFace(m); vcg::LocalOptimization<MeshType> DeciSession(m); MyColl::bb()=m.bbox; MyColl::preserveBBox()=preserveBB; if(absoluteError==0) { // guess the mc side. // In a MC mesh the vertices are on the egdes of the cells. and the edges are (mostly) on face of the cells. // If you have 2 vert over the same face xy they share z std::vector<float> ZSet; typename MeshType::FaceIterator fi; for(fi = m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { Point3f v0=(*fi).V(0)->P(); Point3f v1=(*fi).V(1)->P(); Point3f v2=(*fi).V(2)->P(); if(v0[2]==v1[2] && v0[1]!=v1[1] && v0[0]!=v1[0]) ZSet.push_back(v0[2]); if(v0[2]==v2[2] && v0[1]!=v1[1] && v2[0]!=v2[0]) ZSet.push_back(v0[2]); if(v1[2]==v2[2] && v1[1]!=v1[1] && v2[0]!=v2[0]) ZSet.push_back(v0[2]); if(ZSet.size()>100) break; } std::sort(ZSet.begin(),ZSet.end()); std::vector<float>::iterator lastV = std::unique(ZSet.begin(),ZSet.end()); ZSet.resize(lastV-ZSet.begin()); float Delta=0; for(size_t i = 0; i< ZSet.size()-1;++i) { Delta = std::max(ZSet[i+1]-ZSet[i],Delta); //qDebug("%f",Delta); } absoluteError= Delta/4.0f; } //qDebug("Simplifying at absoluteError=%f",absoluteError); float TargetError = absoluteError; char buf[1024]; DeciSession.template Init< MyColl > (); MyColl::areaThr()=TargetError*TargetError; DeciSession.SetTimeBudget(1.0f); if(TargetError < std::numeric_limits<float>::max() ) DeciSession.SetTargetMetric(TargetError); while(DeciSession.DoOptimization() && DeciSession.currMetric < TargetError) { sprintf(buf,"Simplyfing %7i err %9g \r",m.fn,DeciSession.currMetric); if (cb) cb(int(100.0f*DeciSession.currMetric/TargetError),buf); } } } // end namespace tri } // end namespace vcg #endif
wbb3_fmt_plug.c
/* WoltLab Burning Board 3 (WBB3) cracker patch for JtR. Hacked together during * May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$wbb3$*type*hash * * Where, * * type => 1, for sha1($salt.sha1($salt.sha1($pass))) hashing scheme * * JimF, July 2012. * Made small change in hex_encode 10x improvement in speed. Also some other * changes. Should be a thin dyanamic. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_wbb3; #elif FMT_REGISTERS_H john_register_one(&fmt_wbb3); #else #include "arch.h" #include "sha.h" #include <string.h> #include <assert.h> #include <errno.h> #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "wbb3" #define FORMAT_NAME "WoltLab BB3" #define FORMAT_TAG "$wbb3$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 20 #define MAX_SALT_LEN 40 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 static struct fmt_tests wbb3_tests[] = { {"$wbb3$*1*0b053db07dc02bc6f6e24e00462f17e3c550afa9*e2063f7c629d852302d3020599376016ff340399", "123456"}, {"$wbb3$*1*0b053db07dc02bc6f6e24e00462f17e3c550afa9*f6975cc560c5d03feb702158d08f90bf2fa773d6", "password"}, {"$wbb3$*1*a710463f75bf4568d398db32a53f9803007388a3*2c56d23b44eb122bb176dfa2a1452afaf89f1143", "123456"}, {"$wbb3$*1*1039145e9e785ddb2ac7ccca89ac1b159b595cc1*2596b5f8e7cdaf4b15604ad336b810e8e2935b1d", "12345678"}, {"$wbb3$*1*db763342e23f8ccdbd9c90d1cc7896d80b7e0a44*26496a87c1a7dd68f7beceb2fc40b6fc4223a453", "12345678"}, {"$wbb3$*1*bf2c7d0c8fb6cb146adf8933e32da012d31b5bbb*d945c02cf85738b7db4f4f05edd676283280a513", "123456789"}, {"$wbb3$*1*d132b22d3f1d942b99cc1f5fbd5cc3eb0824d608*e3e03fe02223c5030e834f81997f614b43441853", "1234567890"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static unsigned char (*hexhash1)[40]; static int dirty; static struct custom_salt { int type; unsigned char salt[MAX_SALT_LEN+1]; } *cur_salt; inline static void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); hexhash1 = mem_calloc(self->params.max_keys_per_crypt, sizeof(*hexhash1)); } static void done(void) { MEM_FREE(hexhash1); MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char _ctcopy[256], *ctcopy = _ctcopy; char *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; strnzcpy(ctcopy, ciphertext, 255); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "*"); /* type */ if (!p) goto err; if (!isdec(p)) goto err; res = atoi(p); if (res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; res = strlen(p); if (res > MAX_SALT_LEN || !ishexlc_oddOK(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; return 1; err: return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char _ctcopy[256], *ctcopy = _ctcopy; char *p; memset(&cs, 0, sizeof(cs)); strnzcpy(ctcopy, ciphertext, 255); ctcopy += FORMAT_TAG_LEN; /* skip over "$wbb3$*" */ p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); strcpy((char *)cs.salt, p); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char hexhash[40]; SHA_CTX ctx; if (dirty) { unsigned char out[20]; SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA1_Final(out, &ctx); hex_encode(out, 20, hexhash1[index]); } SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, 40); SHA1_Update(&ctx, hexhash1[index], 40); SHA1_Final((unsigned char*)crypt_out[index], &ctx); hex_encode((unsigned char*)crypt_out[index], 20, hexhash); SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, 40); SHA1_Update(&ctx, hexhash, 40); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (*((uint32_t*)binary) == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *((uint32_t*)binary) == crypt_out[index][0]; } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void wbb3_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; dirty = 1; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_wbb3 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, wbb3_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, wbb3_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
struct_matrix.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.43 $ ***********************************************************************EHEADER*/ /****************************************************************************** * * Member functions for hypre_StructMatrix class. * *****************************************************************************/ #include "_hypre_struct_mv.h" /*-------------------------------------------------------------------------- * hypre_StructMatrixExtractPointerByIndex * Returns pointer to data for stencil entry coresponding to * `index' in `matrix'. If the index does not exist in the matrix's * stencil, the NULL pointer is returned. *--------------------------------------------------------------------------*/ double * hypre_StructMatrixExtractPointerByIndex( hypre_StructMatrix *matrix, HYPRE_Int b, hypre_Index index ) { hypre_StructStencil *stencil; HYPRE_Int rank; stencil = hypre_StructMatrixStencil(matrix); rank = hypre_StructStencilElementRank( stencil, index ); if ( rank >= 0 ) return hypre_StructMatrixBoxData(matrix, b, rank); else return NULL; /* error - invalid index */ } /*-------------------------------------------------------------------------- * hypre_StructMatrixCreate *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_StructMatrixCreate( MPI_Comm comm, hypre_StructGrid *grid, hypre_StructStencil *user_stencil ) { hypre_StructMatrix *matrix; HYPRE_Int i; matrix = hypre_CTAlloc(hypre_StructMatrix, 1); hypre_StructMatrixComm(matrix) = comm; hypre_StructGridRef(grid, &hypre_StructMatrixGrid(matrix)); hypre_StructMatrixUserStencil(matrix) = hypre_StructStencilRef(user_stencil); hypre_StructMatrixDataAlloced(matrix) = 1; hypre_StructMatrixRefCount(matrix) = 1; /* set defaults */ hypre_StructMatrixSymmetric(matrix) = 0; hypre_StructMatrixConstantCoefficient(matrix) = 0; for (i = 0; i < 6; i++) { hypre_StructMatrixNumGhost(matrix)[i] = hypre_StructGridNumGhost(grid)[i]; } return matrix; } /*-------------------------------------------------------------------------- * hypre_StructMatrixRef *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_StructMatrixRef( hypre_StructMatrix *matrix ) { hypre_StructMatrixRefCount(matrix) ++; return matrix; } /*-------------------------------------------------------------------------- * hypre_StructMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixDestroy( hypre_StructMatrix *matrix ) { HYPRE_Int i; if (matrix) { hypre_StructMatrixRefCount(matrix) --; if (hypre_StructMatrixRefCount(matrix) == 0) { if (hypre_StructMatrixDataAlloced(matrix)) { hypre_SharedTFree(hypre_StructMatrixData(matrix)); } hypre_CommPkgDestroy(hypre_StructMatrixCommPkg(matrix)); hypre_ForBoxI(i, hypre_StructMatrixDataSpace(matrix)) hypre_TFree(hypre_StructMatrixDataIndices(matrix)[i]); hypre_TFree(hypre_StructMatrixDataIndices(matrix)); hypre_BoxArrayDestroy(hypre_StructMatrixDataSpace(matrix)); hypre_TFree(hypre_StructMatrixSymmElements(matrix)); hypre_StructStencilDestroy(hypre_StructMatrixUserStencil(matrix)); hypre_StructStencilDestroy(hypre_StructMatrixStencil(matrix)); hypre_StructGridDestroy(hypre_StructMatrixGrid(matrix)); hypre_TFree(matrix); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixInitializeShell *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixInitializeShell( hypre_StructMatrix *matrix ) { hypre_StructGrid *grid; HYPRE_Int ndim; hypre_StructStencil *user_stencil; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int num_values; HYPRE_Int *symm_elements; HYPRE_Int constant_coefficient; HYPRE_Int *num_ghost; HYPRE_Int extra_ghost[] = {0, 0, 0, 0, 0, 0}; hypre_BoxArray *data_space; hypre_BoxArray *boxes; hypre_Box *box; hypre_Box *data_box; HYPRE_Int **data_indices; HYPRE_Int data_size; HYPRE_Int data_box_volume; HYPRE_Int i, j, d; grid = hypre_StructMatrixGrid(matrix); ndim = hypre_StructMatrixDim(matrix); /*----------------------------------------------------------------------- * Set up stencil and num_values: * * If the matrix is symmetric, then the stencil is a "symmetrized" * version of the user's stencil. If the matrix is not symmetric, * then the stencil is the same as the user's stencil. * * The `symm_elements' array is used to determine what data is * explicitely stored (symm_elements[i] < 0) and what data does is * not explicitely stored (symm_elements[i] >= 0), but is instead * stored as the transpose coefficient at a neighboring grid point. *-----------------------------------------------------------------------*/ if (hypre_StructMatrixStencil(matrix) == NULL) { user_stencil = hypre_StructMatrixUserStencil(matrix); if (hypre_StructMatrixSymmetric(matrix)) { /* store only symmetric stencil entry data */ hypre_StructStencilSymmetrize(user_stencil, &stencil, &symm_elements); num_values = ( hypre_StructStencilSize(stencil) + 1 ) / 2; } else { /* store all stencil entry data */ stencil = hypre_StructStencilRef(user_stencil); num_values = hypre_StructStencilSize(stencil); symm_elements = hypre_TAlloc(HYPRE_Int, num_values); for (i = 0; i < num_values; i++) { symm_elements[i] = -1; } } hypre_StructMatrixStencil(matrix) = stencil; hypre_StructMatrixSymmElements(matrix) = symm_elements; hypre_StructMatrixNumValues(matrix) = num_values; } /*----------------------------------------------------------------------- * Set ghost-layer size for symmetric storage * - All stencil coeffs are to be available at each point in the * grid, as well as in the user-specified ghost layer. *-----------------------------------------------------------------------*/ num_ghost = hypre_StructMatrixNumGhost(matrix); stencil = hypre_StructMatrixStencil(matrix); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); symm_elements = hypre_StructMatrixSymmElements(matrix); for (i = 0; i < stencil_size; i++) { if (symm_elements[i] >= 0) { for (d = 0; d < ndim; d++) { extra_ghost[2*d] = hypre_max(extra_ghost[2*d], -hypre_IndexD(stencil_shape[i], d)); extra_ghost[2*d + 1] = hypre_max(extra_ghost[2*d + 1], hypre_IndexD(stencil_shape[i], d)); } } } for (d = 0; d < ndim; d++) { num_ghost[2*d] += extra_ghost[2*d]; num_ghost[2*d + 1] += extra_ghost[2*d + 1]; } /*----------------------------------------------------------------------- * Set up data_space *-----------------------------------------------------------------------*/ if (hypre_StructMatrixDataSpace(matrix) == NULL) { boxes = hypre_StructGridBoxes(grid); data_space = hypre_BoxArrayCreate(hypre_BoxArraySize(boxes)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); data_box = hypre_BoxArrayBox(data_space, i); hypre_CopyBox(box, data_box); for (d = 0; d < 3; d++) { hypre_BoxIMinD(data_box, d) -= num_ghost[2*d]; hypre_BoxIMaxD(data_box, d) += num_ghost[2*d + 1]; } } hypre_StructMatrixDataSpace(matrix) = data_space; } /*----------------------------------------------------------------------- * Set up data_indices array and data-size *-----------------------------------------------------------------------*/ if (hypre_StructMatrixDataIndices(matrix) == NULL) { data_space = hypre_StructMatrixDataSpace(matrix); data_indices = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(data_space)); constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix); data_size = 0; if ( constant_coefficient==0 ) { hypre_ForBoxI(i, data_space) { data_box = hypre_BoxArrayBox(data_space, i); data_box_volume = hypre_BoxVolume(data_box); data_indices[i] = hypre_CTAlloc(HYPRE_Int, stencil_size); /* set pointers for "stored" coefficients */ for (j = 0; j < stencil_size; j++) { if (symm_elements[j] < 0) { data_indices[i][j] = data_size; data_size += data_box_volume; } } /* set pointers for "symmetric" coefficients */ for (j = 0; j < stencil_size; j++) { if (symm_elements[j] >= 0) { data_indices[i][j] = data_indices[i][symm_elements[j]] + hypre_BoxOffsetDistance(data_box, stencil_shape[j]); } } } } else if ( constant_coefficient==1 ) { hypre_ForBoxI(i, data_space) { data_box = hypre_BoxArrayBox(data_space, i); data_box_volume = hypre_BoxVolume(data_box); data_indices[i] = hypre_CTAlloc(HYPRE_Int, stencil_size); /* set pointers for "stored" coefficients */ for (j = 0; j < stencil_size; j++) { if (symm_elements[j] < 0) { data_indices[i][j] = data_size; ++data_size; } } /* set pointers for "symmetric" coefficients */ for (j = 0; j < stencil_size; j++) { if (symm_elements[j] >= 0) { data_indices[i][j] = data_indices[i][symm_elements[j]]; } } } } else { hypre_assert( constant_coefficient == 2 ); data_size += stencil_size; /* all constant coeffs at the beginning */ /* ... this allocates a little more space than is absolutely necessary */ hypre_ForBoxI(i, data_space) { data_box = hypre_BoxArrayBox(data_space, i); data_box_volume = hypre_BoxVolume(data_box); data_indices[i] = hypre_CTAlloc(HYPRE_Int, stencil_size); /* set pointers for "stored" coefficients */ for (j = 0; j < stencil_size; j++) { if (symm_elements[j] < 0) { if ( hypre_IndexX(stencil_shape[j])==0 && hypre_IndexY(stencil_shape[j])==0 && hypre_IndexZ(stencil_shape[j])==0 ) /* diagonal, variable * coefficient */ { data_indices[i][j] = data_size; data_size += data_box_volume; } else /* off-diagonal, constant coefficient */ { data_indices[i][j] = j; } } } /* set pointers for "symmetric" coefficients */ for (j = 0; j < stencil_size; j++) { if (symm_elements[j] >= 0) { if ( hypre_IndexX(stencil_shape[j])==0 && hypre_IndexY(stencil_shape[j])==0 && hypre_IndexZ(stencil_shape[j])==0 ) /* diagonal, variable * coefficient */ { data_indices[i][j] = data_indices[i][symm_elements[j]] + hypre_BoxOffsetDistance(data_box, stencil_shape[j]); } else /* off-diagonal, constant coefficient */ { data_indices[i][j] = data_indices[i][symm_elements[j]]; } } } } } hypre_StructMatrixDataIndices(matrix) = data_indices; hypre_StructMatrixDataSize(matrix) = data_size; } /*----------------------------------------------------------------------- * Set total number of nonzero coefficients * For constant coefficients, this is unrelated to the amount of data * actually stored. *-----------------------------------------------------------------------*/ hypre_StructMatrixGlobalSize(matrix) = hypre_StructGridGlobalSize(grid) * stencil_size; /*----------------------------------------------------------------------- * Return *-----------------------------------------------------------------------*/ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixInitializeData *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixInitializeData( hypre_StructMatrix *matrix, double *data ) { hypre_StructMatrixData(matrix) = data; hypre_StructMatrixDataAlloced(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixInitialize( hypre_StructMatrix *matrix ) { double *data; hypre_StructMatrixInitializeShell(matrix); data = hypre_StructMatrixData(matrix); data = hypre_SharedCTAlloc(double, hypre_StructMatrixDataSize(matrix)); hypre_StructMatrixInitializeData(matrix, data); hypre_StructMatrixDataAlloced(matrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * should not be called to set a constant-coefficient part of the matrix, * call hypre_StructMatrixSetConstantValues instead *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixSetValues( hypre_StructMatrix *matrix, hypre_Index grid_index, HYPRE_Int num_stencil_indices, HYPRE_Int *stencil_indices, double *values, HYPRE_Int action, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; hypre_Index center_index; hypre_StructStencil *stencil; HYPRE_Int center_rank; HYPRE_Int *symm_elements; HYPRE_Int constant_coefficient; double *matp; HYPRE_Int i, s, istart, istop; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix); symm_elements = hypre_StructMatrixSymmElements(matrix); if (outside > 0) { grid_boxes = hypre_StructMatrixDataSpace(matrix); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix)); } if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } /*----------------------------------------------------------------------- * Set the matrix coefficients *-----------------------------------------------------------------------*/ for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); if ((hypre_IndexX(grid_index) >= hypre_BoxIMinX(grid_box)) && (hypre_IndexX(grid_index) <= hypre_BoxIMaxX(grid_box)) && (hypre_IndexY(grid_index) >= hypre_BoxIMinY(grid_box)) && (hypre_IndexY(grid_index) <= hypre_BoxIMaxY(grid_box)) && (hypre_IndexZ(grid_index) >= hypre_BoxIMinZ(grid_box)) && (hypre_IndexZ(grid_index) <= hypre_BoxIMaxZ(grid_box)) ) { if ( constant_coefficient==2 ) { hypre_SetIndex(center_index, 0, 0, 0); stencil = hypre_StructMatrixStencil(matrix); center_rank = hypre_StructStencilElementRank( stencil, center_index ); } for (s = 0; s < num_stencil_indices; s++) { /* only set stored stencil values */ if (symm_elements[stencil_indices[s]] < 0) { if ( (constant_coefficient==1) || (constant_coefficient==2 && stencil_indices[s]!=center_rank )) { /* call SetConstantValues instead */ hypre_error(HYPRE_ERROR_GENERIC); matp = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]); } else /* variable coefficient, constant_coefficient=0 */ { matp = hypre_StructMatrixBoxDataValue( matrix, i, stencil_indices[s], grid_index); } if (action > 0) { *matp += values[s]; } else if (action > -1) { *matp = values[s]; } else /* action < 0 */ { values[s] = *matp; } } } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out * * should not be called to set a constant-coefficient part of the matrix, * call hypre_StructMatrixSetConstantValues instead *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixSetBoxValues( hypre_StructMatrix *matrix, hypre_Box *set_box, hypre_Box *value_box, HYPRE_Int num_stencil_indices, HYPRE_Int *stencil_indices, double *values, HYPRE_Int action, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; hypre_Box *int_box; hypre_Index center_index; hypre_StructStencil *stencil; HYPRE_Int center_rank; HYPRE_Int *symm_elements; hypre_BoxArray *data_space; hypre_Box *data_box; hypre_IndexRef data_start; hypre_Index data_stride; HYPRE_Int datai; double *datap; HYPRE_Int constant_coefficient; hypre_Box *dval_box; hypre_Index dval_start; hypre_Index dval_stride; HYPRE_Int dvali; hypre_Index loop_size; HYPRE_Int i, s, istart, istop; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix); symm_elements = hypre_StructMatrixSymmElements(matrix); if (outside > 0) { grid_boxes = hypre_StructMatrixDataSpace(matrix); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix)); } data_space = hypre_StructMatrixDataSpace(matrix); if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } /*----------------------------------------------------------------------- * Set the matrix coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(data_stride, 1, 1, 1); int_box = hypre_BoxCreate(); dval_box = hypre_BoxDuplicate(value_box); hypre_BoxIMinD(dval_box, 0) *= num_stencil_indices; hypre_BoxIMaxD(dval_box, 0) *= num_stencil_indices; hypre_BoxIMaxD(dval_box, 0) += num_stencil_indices - 1; hypre_SetIndex(dval_stride, num_stencil_indices, 1, 1); for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); data_box = hypre_BoxArrayBox(data_space, i); hypre_IntersectBoxes(set_box, grid_box, int_box); /* if there was an intersection */ if (hypre_BoxVolume(int_box)) { data_start = hypre_BoxIMin(int_box); hypre_CopyIndex(data_start, dval_start); hypre_IndexD(dval_start, 0) *= num_stencil_indices; if ( constant_coefficient==2 ) { hypre_SetIndex(center_index, 0, 0, 0); stencil = hypre_StructMatrixStencil(matrix); center_rank = hypre_StructStencilElementRank( stencil, center_index ); } for (s = 0; s < num_stencil_indices; s++) { /* only set stored stencil values */ if (symm_elements[stencil_indices[s]] < 0) { datap = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]); if ( (constant_coefficient==1) || (constant_coefficient==2 && stencil_indices[s]!=center_rank )) /* datap has only one data point for a given i and s */ { /* should have called SetConstantValues */ hypre_error(HYPRE_ERROR_GENERIC); hypre_BoxGetSize(int_box, loop_size); if (action > 0) { datai = hypre_CCBoxIndexRank(data_box,data_start); dvali = hypre_BoxIndexRank(dval_box,dval_start); datap[datai] += values[dvali]; } else if (action > -1) { datai = hypre_CCBoxIndexRank(data_box,data_start); dvali = hypre_BoxIndexRank(dval_box,dval_start); datap[datai] = values[dvali]; } else { datai = hypre_CCBoxIndexRank(data_box,data_start); dvali = hypre_BoxIndexRank(dval_box,dval_start); values[dvali] = datap[datai]; if (action == -2) { datap[datai] = 0; } } } else /* variable coefficient: constant_coefficient==0 or diagonal with constant_coefficient==2 */ { hypre_BoxGetSize(int_box, loop_size); if (action > 0) { hypre_BoxLoop2Begin(hypre_StructMatrixDim(matrix), loop_size, data_box,data_start,data_stride,datai, dval_box,dval_start,dval_stride,dvali); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(datai, dvali) { datap[datai] += values[dvali]; } hypre_BoxLoop2End(datai, dvali); } else if (action > -1) { hypre_BoxLoop2Begin(hypre_StructMatrixDim(matrix), loop_size, data_box,data_start,data_stride,datai, dval_box,dval_start,dval_stride,dvali); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(datai, dvali) { datap[datai] = values[dvali]; } hypre_BoxLoop2End(datai, dvali); } else { hypre_BoxLoop2Begin(hypre_StructMatrixDim(matrix), loop_size, data_box,data_start,data_stride,datai, dval_box,dval_start,dval_stride,dvali); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(datai, dvali) { values[dvali] = datap[datai]; if (action == -2) { datap[datai] = 0; } } hypre_BoxLoop2End(datai, dvali); } } } /* end if (symm_elements) */ hypre_IndexD(dval_start, 0) ++; } } } hypre_BoxDestroy(int_box); hypre_BoxDestroy(dval_box); return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out (not implemented, just gets values) * should be called to set a constant-coefficient part of the matrix *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixSetConstantValues( hypre_StructMatrix *matrix, HYPRE_Int num_stencil_indices, HYPRE_Int *stencil_indices, double *values, HYPRE_Int action ) { hypre_BoxArray *boxes; hypre_Box *box; hypre_Index center_index; hypre_StructStencil *stencil; HYPRE_Int center_rank; HYPRE_Int constant_coefficient; double *matp; HYPRE_Int i, s; boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix)); constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix); if ( constant_coefficient==1 ) { hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); if (action > 0) { for (s = 0; s < num_stencil_indices; s++) { matp = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]); *matp += values[s]; } } else if (action > -1) { for (s = 0; s < num_stencil_indices; s++) { matp = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]); *matp = values[s]; } } else /* action < 0 */ { for (s = 0; s < num_stencil_indices; s++) { matp = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]); values[s] = *matp; } } } } else if ( constant_coefficient==2 ) { hypre_SetIndex(center_index, 0, 0, 0); stencil = hypre_StructMatrixStencil(matrix); center_rank = hypre_StructStencilElementRank( stencil, center_index ); if ( action > 0 ) { for (s = 0; s < num_stencil_indices; s++) { if ( stencil_indices[s] == center_rank ) { /* center (diagonal), like constant_coefficient==0 We consider it an error, but do the best we can. */ hypre_error(HYPRE_ERROR_GENERIC); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); hypre_StructMatrixSetBoxValues( matrix, box, box, num_stencil_indices, stencil_indices, values, action, -1, 0 ); } } else { /* non-center, like constant_coefficient==1 */ matp = hypre_StructMatrixBoxData(matrix, 0, stencil_indices[s]); *matp += values[s]; } } } else if ( action > -1 ) { for (s = 0; s < num_stencil_indices; s++) { if ( stencil_indices[s] == center_rank ) { /* center (diagonal), like constant_coefficient==0 We consider it an error, but do the best we can. */ hypre_error(HYPRE_ERROR_GENERIC); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); hypre_StructMatrixSetBoxValues( matrix, box, box, num_stencil_indices, stencil_indices, values, action, -1, 0 ); } } else { /* non-center, like constant_coefficient==1 */ matp = hypre_StructMatrixBoxData(matrix, 0, stencil_indices[s]); *matp += values[s]; } } } else /* action<0 */ { for (s = 0; s < num_stencil_indices; s++) { if ( stencil_indices[s] == center_rank ) { /* center (diagonal), like constant_coefficient==0 We consider it an error, but do the best we can. */ hypre_error(HYPRE_ERROR_GENERIC); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); hypre_StructMatrixSetBoxValues( matrix, box, box, num_stencil_indices, stencil_indices, values, -1, -1, 0 ); } } else { /* non-center, like constant_coefficient==1 */ matp = hypre_StructMatrixBoxData(matrix, 0, stencil_indices[s]); values[s] = *matp; } } } } else /* constant_coefficient==0 */ { /* We consider this an error, but do the best we can. */ hypre_error(HYPRE_ERROR_GENERIC); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); hypre_StructMatrixSetBoxValues( matrix, box, box, num_stencil_indices, stencil_indices, values, action, -1, 0 ); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (outside > 0): clear values possibly outside of the grid extents * (outside = 0): clear values only inside the grid extents *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixClearValues( hypre_StructMatrix *matrix, hypre_Index grid_index, HYPRE_Int num_stencil_indices, HYPRE_Int *stencil_indices, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; double *matp; HYPRE_Int i, s, istart, istop; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ if (outside > 0) { grid_boxes = hypre_StructMatrixDataSpace(matrix); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix)); } if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } /*----------------------------------------------------------------------- * Clear the matrix coefficients *-----------------------------------------------------------------------*/ for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); if ((hypre_IndexX(grid_index) >= hypre_BoxIMinX(grid_box)) && (hypre_IndexX(grid_index) <= hypre_BoxIMaxX(grid_box)) && (hypre_IndexY(grid_index) >= hypre_BoxIMinY(grid_box)) && (hypre_IndexY(grid_index) <= hypre_BoxIMaxY(grid_box)) && (hypre_IndexZ(grid_index) >= hypre_BoxIMinZ(grid_box)) && (hypre_IndexZ(grid_index) <= hypre_BoxIMaxZ(grid_box)) ) { for (s = 0; s < num_stencil_indices; s++) { matp = hypre_StructMatrixBoxDataValue(matrix, i, stencil_indices[s], grid_index); *matp = 0.0; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (outside > 0): clear values possibly outside of the grid extents * (outside = 0): clear values only inside the grid extents *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixClearBoxValues( hypre_StructMatrix *matrix, hypre_Box *clear_box, HYPRE_Int num_stencil_indices, HYPRE_Int *stencil_indices, HYPRE_Int boxnum, HYPRE_Int outside ) { hypre_BoxArray *grid_boxes; hypre_Box *grid_box; hypre_Box *int_box; HYPRE_Int *symm_elements; hypre_BoxArray *data_space; hypre_Box *data_box; hypre_IndexRef data_start; hypre_Index data_stride; HYPRE_Int datai; double *datap; hypre_Index loop_size; HYPRE_Int i, s, istart, istop; /*----------------------------------------------------------------------- * Initialize some things *-----------------------------------------------------------------------*/ if (outside > 0) { grid_boxes = hypre_StructMatrixDataSpace(matrix); } else { grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix)); } data_space = hypre_StructMatrixDataSpace(matrix); if (boxnum < 0) { istart = 0; istop = hypre_BoxArraySize(grid_boxes); } else { istart = boxnum; istop = istart + 1; } /*----------------------------------------------------------------------- * Clear the matrix coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(data_stride, 1, 1, 1); symm_elements = hypre_StructMatrixSymmElements(matrix); int_box = hypre_BoxCreate(); for (i = istart; i < istop; i++) { grid_box = hypre_BoxArrayBox(grid_boxes, i); data_box = hypre_BoxArrayBox(data_space, i); hypre_IntersectBoxes(clear_box, grid_box, int_box); /* if there was an intersection */ if (hypre_BoxVolume(int_box)) { data_start = hypre_BoxIMin(int_box); for (s = 0; s < num_stencil_indices; s++) { /* only clear stencil entries that are explicitly stored */ if (symm_elements[stencil_indices[s]] < 0) { datap = hypre_StructMatrixBoxData(matrix, i, stencil_indices[s]); hypre_BoxGetSize(int_box, loop_size); hypre_BoxLoop1Begin(hypre_StructMatrixDim(matrix), loop_size, data_box,data_start,data_stride,datai); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(datai) { datap[datai] = 0.0; } hypre_BoxLoop1End(datai); } } } } hypre_BoxDestroy(int_box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixAssemble( hypre_StructMatrix *matrix ) { HYPRE_Int *num_ghost = hypre_StructMatrixNumGhost(matrix); HYPRE_Int comm_num_values, mat_num_values, constant_coefficient; HYPRE_Int stencil_size; hypre_StructStencil *stencil; hypre_CommInfo *comm_info; hypre_CommPkg *comm_pkg; hypre_CommHandle *comm_handle; HYPRE_Int data_initial_offset = 0; double *matrix_data = hypre_StructMatrixData(matrix); double *matrix_data_comm = matrix_data; /* BEGIN - variables for ghost layer identity code below */ hypre_StructGrid *grid; hypre_BoxArray *boxes; hypre_BoxManager *boxman; hypre_BoxArray *data_space; hypre_BoxArrayArray *boundary_boxes; hypre_BoxArray *boundary_box_a; hypre_BoxArray *entry_box_a; hypre_BoxArray *tmp_box_a; hypre_Box *data_box; hypre_Box *boundary_box; hypre_Box *entry_box; hypre_BoxManEntry **entries; hypre_IndexRef periodic; hypre_Index loop_size; hypre_Index index; hypre_IndexRef start; hypre_Index stride; double *datap; HYPRE_Int i, j, ei, datai; HYPRE_Int num_entries; /* End - variables for ghost layer identity code below */ constant_coefficient = hypre_StructMatrixConstantCoefficient( matrix ); /*----------------------------------------------------------------------- * Set ghost zones along the domain boundary to the identity to enable code * simplifications elsewhere in hypre (e.g., CyclicReduction). * * Intersect each data box with the BoxMan to get neighbors, then subtract * the neighbors from the box to get the boundary boxes. *-----------------------------------------------------------------------*/ if ( constant_coefficient!=1 ) { data_space = hypre_StructMatrixDataSpace(matrix); grid = hypre_StructMatrixGrid(matrix); boxes = hypre_StructGridBoxes(grid); boxman = hypre_StructGridBoxMan(grid); periodic = hypre_StructGridPeriodic(grid); boundary_boxes = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(data_space)); entry_box_a = hypre_BoxArrayCreate(0); tmp_box_a = hypre_BoxArrayCreate(0); hypre_ForBoxI(i, data_space) { /* copy data box to boundary_box_a */ boundary_box_a = hypre_BoxArrayArrayBoxArray(boundary_boxes, i); hypre_BoxArraySetSize(boundary_box_a, 1); boundary_box = hypre_BoxArrayBox(boundary_box_a, 0); hypre_CopyBox(hypre_BoxArrayBox(data_space, i), boundary_box); hypre_BoxManIntersect(boxman, hypre_BoxIMin(boundary_box), hypre_BoxIMax(boundary_box), &entries , &num_entries); /* put neighbor boxes into entry_box_a */ hypre_BoxArraySetSize(entry_box_a, num_entries); for (ei = 0; ei < num_entries; ei++) { entry_box = hypre_BoxArrayBox(entry_box_a, ei); hypre_BoxManEntryGetExtents(entries[ei], hypre_BoxIMin(entry_box), hypre_BoxIMax(entry_box)); } hypre_TFree(entries); /* subtract neighbor boxes (entry_box_a) from data box (boundary_box_a) */ hypre_SubtractBoxArrays(boundary_box_a, entry_box_a, tmp_box_a); } hypre_BoxArrayDestroy(entry_box_a); hypre_BoxArrayDestroy(tmp_box_a); /* set boundary ghost zones to the identity equation */ hypre_SetIndex(index, 0, 0, 0); hypre_SetIndex(stride, 1, 1, 1); data_space = hypre_StructMatrixDataSpace(matrix); hypre_ForBoxI(i, data_space) { datap = hypre_StructMatrixExtractPointerByIndex(matrix, i, index); if (datap) { data_box = hypre_BoxArrayBox(data_space, i); boundary_box_a = hypre_BoxArrayArrayBoxArray(boundary_boxes, i); hypre_ForBoxI(j, boundary_box_a) { boundary_box = hypre_BoxArrayBox(boundary_box_a, j); start = hypre_BoxIMin(boundary_box); hypre_BoxGetSize(boundary_box, loop_size); hypre_BoxLoop1Begin(hypre_StructMatrixDim(matrix), loop_size, data_box, start, stride, datai); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(datai) { datap[datai] = 1.0; } hypre_BoxLoop1End(datai); } } } hypre_BoxArrayArrayDestroy(boundary_boxes); } /*----------------------------------------------------------------------- * If the CommPkg has not been set up, set it up * * The matrix data array is assumed to have two segments - an initial * segment of data constant over all space, followed by a segment with * comm_num_values matrix entries for each mesh element. The mesh-dependent * data is, of course, the only part relevent to communications. * For constant_coefficient==0, all the data is mesh-dependent. * For constant_coefficient==1, all data is constant. * For constant_coefficient==2, both segments are non-null. *-----------------------------------------------------------------------*/ mat_num_values = hypre_StructMatrixNumValues(matrix); if ( constant_coefficient==0 ) { comm_num_values = mat_num_values; } else if ( constant_coefficient==1 ) { comm_num_values = 0; } else /* constant_coefficient==2 */ { comm_num_values = 1; stencil = hypre_StructMatrixStencil(matrix); stencil_size = hypre_StructStencilSize(stencil); data_initial_offset = stencil_size; matrix_data_comm = &( matrix_data[data_initial_offset] ); } comm_pkg = hypre_StructMatrixCommPkg(matrix); if (!comm_pkg) { hypre_CreateCommInfoFromNumGhost(hypre_StructMatrixGrid(matrix), num_ghost, &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructMatrixDataSpace(matrix), hypre_StructMatrixDataSpace(matrix), comm_num_values, NULL, 0, hypre_StructMatrixComm(matrix), &comm_pkg); hypre_CommInfoDestroy(comm_info); hypre_StructMatrixCommPkg(matrix) = comm_pkg; } /*----------------------------------------------------------------------- * Update the ghost data * This takes care of the communication needs of all known functions * referencing the matrix. * * At present this is the only place where matrix data gets communicated. * However, comm_pkg is kept as long as the matrix is, in case some * future version hypre has a use for it - e.g. if the user replaces * a matrix with a very similar one, we may not want to recompute comm_pkg. *-----------------------------------------------------------------------*/ if ( constant_coefficient!=1 ) { hypre_InitializeCommunication( comm_pkg, matrix_data_comm, matrix_data_comm, 0, 0, &comm_handle ); hypre_FinalizeCommunication( comm_handle ); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixSetNumGhost *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixSetNumGhost( hypre_StructMatrix *matrix, HYPRE_Int *num_ghost ) { HYPRE_Int d, ndim = hypre_StructMatrixDim(matrix); for (d = 0; d < ndim; d++) { hypre_StructMatrixNumGhost(matrix)[2*d] = num_ghost[2*d]; hypre_StructMatrixNumGhost(matrix)[2*d + 1] = num_ghost[2*d + 1]; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixSetConstantCoefficient * deprecated in user interface, in favor of SetConstantEntries. * left here for internal use *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixSetConstantCoefficient( hypre_StructMatrix *matrix, HYPRE_Int constant_coefficient ) { hypre_StructMatrixConstantCoefficient(matrix) = constant_coefficient; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixSetConstantEntries * - nentries is the number of array entries * - Each HYPRE_Int entries[i] is an index into the shape array of the stencil * of the matrix * In the present version, only three possibilites are recognized: * - no entries constant (constant_coefficient==0) * - all entries constant (constant_coefficient==1) * - all but the diagonal entry constant (constant_coefficient==2) * If something else is attempted, this function will return a nonzero error. * In the present version, if this function is called more than once, only * the last call will take effect. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixSetConstantEntries( hypre_StructMatrix *matrix, HYPRE_Int nentries, HYPRE_Int *entries ) { /* We make an array offdconst corresponding to the stencil's shape array, and use "entries" to fill it with flags - 1 for constant, 0 otherwise. By counting the nonzeros in offdconst, and by checking whether its diagonal entry is nonzero, we can distinguish among the three presently legal values of constant_coefficient, and detect input errors. We do not need to treat duplicates in "entries" as an error condition. */ hypre_StructStencil *stencil = hypre_StructMatrixUserStencil(matrix); /* ... Stencil doesn't exist yet */ HYPRE_Int stencil_size = hypre_StructStencilSize(stencil); HYPRE_Int *offdconst = hypre_CTAlloc(HYPRE_Int, stencil_size); /* ... note: CTAlloc initializes to 0 (normally it works by calling calloc) */ HYPRE_Int nconst = 0; HYPRE_Int constant_coefficient, diag_rank; hypre_Index diag_index; HYPRE_Int i, j; for ( i=0; i<nentries; ++i ) { offdconst[ entries[i] ] = 1; } for ( j=0; j<stencil_size; ++j ) { nconst += offdconst[j]; } if ( nconst<=0 ) constant_coefficient=0; else if ( nconst>=stencil_size ) constant_coefficient=1; else { hypre_SetIndex(diag_index, 0, 0, 0); diag_rank = hypre_StructStencilElementRank( stencil, diag_index ); if ( offdconst[diag_rank]==0 ) { constant_coefficient=2; if ( nconst!=(stencil_size-1) ) { hypre_error(HYPRE_ERROR_GENERIC); } } else { constant_coefficient=0; hypre_error(HYPRE_ERROR_GENERIC); } } hypre_StructMatrixSetConstantCoefficient( matrix, constant_coefficient ); hypre_TFree(offdconst); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixClearGhostValues( hypre_StructMatrix *matrix ) { hypre_Box *m_data_box; HYPRE_Int mi; double *mp; hypre_StructStencil *stencil; HYPRE_Int *symm_elements; hypre_BoxArray *boxes; hypre_Box *box; hypre_BoxArray *diff_boxes; hypre_Box *diff_box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i, j, s; /*----------------------------------------------------------------------- * Set the matrix coefficients *-----------------------------------------------------------------------*/ hypre_SetIndex(unit_stride, 1, 1, 1); stencil = hypre_StructMatrixStencil(matrix); symm_elements = hypre_StructMatrixSymmElements(matrix); boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(matrix)); diff_boxes = hypre_BoxArrayCreate(0); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); m_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(matrix), i); hypre_BoxArraySetSize(diff_boxes, 0); hypre_SubtractBoxes(m_data_box, box, diff_boxes); for (s = 0; s < hypre_StructStencilSize(stencil); s++) { /* only clear stencil entries that are explicitly stored */ if (symm_elements[s] < 0) { mp = hypre_StructMatrixBoxData(matrix, i, s); hypre_ForBoxI(j, diff_boxes) { diff_box = hypre_BoxArrayBox(diff_boxes, j); start = hypre_BoxIMin(diff_box); hypre_BoxGetSize(diff_box, loop_size); hypre_BoxLoop1Begin(hypre_StructMatrixDim(matrix), loop_size, m_data_box, start, unit_stride, mi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi ) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(mi) { mp[mi] = 0.0; } hypre_BoxLoop1End(mi); } } } } hypre_BoxArrayDestroy(diff_boxes); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixPrint( const char *filename, hypre_StructMatrix *matrix, HYPRE_Int all ) { FILE *file; char new_filename[255]; hypre_StructGrid *grid; hypre_BoxArray *boxes; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; hypre_Index center_index; HYPRE_Int num_values; hypre_BoxArray *data_space; HYPRE_Int *symm_elements; HYPRE_Int i, j; HYPRE_Int constant_coefficient; HYPRE_Int center_rank; HYPRE_Int myid; constant_coefficient = hypre_StructMatrixConstantCoefficient(matrix); /*---------------------------------------- * Open file *----------------------------------------*/ #ifdef HYPRE_USE_PTHREADS #if hypre_MPI_Comm_rank == hypre_thread_MPI_Comm_rank #undef hypre_MPI_Comm_rank #endif #endif hypre_MPI_Comm_rank(hypre_StructMatrixComm(matrix), &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_printf("Error: can't open output file %s\n", new_filename); exit(1); } /*---------------------------------------- * Print header info *----------------------------------------*/ hypre_fprintf(file, "StructMatrix\n"); hypre_fprintf(file, "\nSymmetric: %d\n", hypre_StructMatrixSymmetric(matrix)); hypre_fprintf(file, "\nConstantCoefficient: %d\n", hypre_StructMatrixConstantCoefficient(matrix)); /* print grid info */ hypre_fprintf(file, "\nGrid:\n"); grid = hypre_StructMatrixGrid(matrix); hypre_StructGridPrint(file, grid); /* print stencil info */ hypre_fprintf(file, "\nStencil:\n"); stencil = hypre_StructMatrixStencil(matrix); stencil_shape = hypre_StructStencilShape(stencil); num_values = hypre_StructMatrixNumValues(matrix); symm_elements = hypre_StructMatrixSymmElements(matrix); hypre_fprintf(file, "%d\n", num_values); stencil_size = hypre_StructStencilSize(stencil); j = 0; for (i=0; i<stencil_size; i++) { if (symm_elements[i] < 0) { hypre_fprintf(file, "%d: %d %d %d\n", j++, hypre_IndexX(stencil_shape[i]), hypre_IndexY(stencil_shape[i]), hypre_IndexZ(stencil_shape[i])); } } /*---------------------------------------- * Print data *----------------------------------------*/ data_space = hypre_StructMatrixDataSpace(matrix); if (all) boxes = data_space; else boxes = hypre_StructGridBoxes(grid); hypre_fprintf(file, "\nData:\n"); if ( constant_coefficient==1 ) { hypre_PrintCCBoxArrayData(file, boxes, data_space, num_values, hypre_StructMatrixData(matrix)); } else if ( constant_coefficient==2 ) { hypre_SetIndex(center_index, 0, 0, 0); center_rank = hypre_StructStencilElementRank( stencil, center_index ); hypre_PrintCCVDBoxArrayData(file, boxes, data_space, num_values, center_rank, stencil_size, symm_elements, hypre_StructMatrixData(matrix)); } else { hypre_PrintBoxArrayData(file, boxes, data_space, num_values, hypre_StructMatrixData(matrix)); } /*---------------------------------------- * Close file *----------------------------------------*/ fflush(file); fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixMigrate *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructMatrixMigrate( hypre_StructMatrix *from_matrix, hypre_StructMatrix *to_matrix ) { hypre_CommInfo *comm_info; hypre_CommPkg *comm_pkg; hypre_CommHandle *comm_handle; HYPRE_Int constant_coefficient, comm_num_values; HYPRE_Int stencil_size, mat_num_values; hypre_StructStencil *stencil; HYPRE_Int data_initial_offset = 0; double *matrix_data_from = hypre_StructMatrixData(from_matrix); double *matrix_data_to = hypre_StructMatrixData(to_matrix); double *matrix_data_comm_from = matrix_data_from; double *matrix_data_comm_to = matrix_data_to; /*------------------------------------------------------ * Set up hypre_CommPkg *------------------------------------------------------*/ constant_coefficient = hypre_StructMatrixConstantCoefficient( from_matrix ); hypre_assert( constant_coefficient == hypre_StructMatrixConstantCoefficient( to_matrix ) ); mat_num_values = hypre_StructMatrixNumValues(from_matrix); hypre_assert( mat_num_values = hypre_StructMatrixNumValues(to_matrix) ); if ( constant_coefficient==0 ) { comm_num_values = mat_num_values; } else if ( constant_coefficient==1 ) { comm_num_values = 0; } else /* constant_coefficient==2 */ { comm_num_values = 1; stencil = hypre_StructMatrixStencil(from_matrix); stencil_size = hypre_StructStencilSize(stencil); hypre_assert(stencil_size == hypre_StructStencilSize( hypre_StructMatrixStencil(to_matrix) ) ); data_initial_offset = stencil_size; matrix_data_comm_from = &( matrix_data_from[data_initial_offset] ); matrix_data_comm_to = &( matrix_data_to[data_initial_offset] ); } hypre_CreateCommInfoFromGrids(hypre_StructMatrixGrid(from_matrix), hypre_StructMatrixGrid(to_matrix), &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructMatrixDataSpace(from_matrix), hypre_StructMatrixDataSpace(to_matrix), comm_num_values, NULL, 0, hypre_StructMatrixComm(from_matrix), &comm_pkg); hypre_CommInfoDestroy(comm_info); /* is this correct for periodic? */ /*----------------------------------------------------------------------- * Migrate the matrix data *-----------------------------------------------------------------------*/ if ( constant_coefficient!=1 ) { hypre_InitializeCommunication( comm_pkg, matrix_data_comm_from, matrix_data_comm_to, 0, 0, &comm_handle ); hypre_FinalizeCommunication( comm_handle ); } hypre_CommPkgDestroy(comm_pkg); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructMatrixRead *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_StructMatrixRead( MPI_Comm comm, const char *filename, HYPRE_Int *num_ghost ) { FILE *file; char new_filename[255]; hypre_StructMatrix *matrix; hypre_StructGrid *grid; hypre_BoxArray *boxes; HYPRE_Int dim; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size, real_stencil_size; HYPRE_Int num_values; hypre_BoxArray *data_space; HYPRE_Int symmetric; HYPRE_Int constant_coefficient; HYPRE_Int i, idummy; HYPRE_Int myid; /*---------------------------------------- * Open file *----------------------------------------*/ #ifdef HYPRE_USE_PTHREADS #if hypre_MPI_Comm_rank == hypre_thread_MPI_Comm_rank #undef hypre_MPI_Comm_rank #endif #endif hypre_MPI_Comm_rank(comm, &myid ); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_printf("Error: can't open output file %s\n", new_filename); exit(1); } /*---------------------------------------- * Read header info *----------------------------------------*/ hypre_fscanf(file, "StructMatrix\n"); hypre_fscanf(file, "\nSymmetric: %d\n", &symmetric); hypre_fscanf(file, "\nConstantCoefficient: %d\n", &constant_coefficient); /* read grid info */ hypre_fscanf(file, "\nGrid:\n"); hypre_StructGridRead(comm,file,&grid); /* read stencil info */ hypre_fscanf(file, "\nStencil:\n"); dim = hypre_StructGridDim(grid); hypre_fscanf(file, "%d\n", &stencil_size); if (symmetric) { real_stencil_size = 2*stencil_size-1; } else { real_stencil_size = stencil_size; } /* ... real_stencil_size is the stencil size of the matrix after it's fixed up by the call (if any) of hypre_StructStencilSymmetrize from hypre_StructMatrixInitializeShell.*/ stencil_shape = hypre_CTAlloc(hypre_Index, stencil_size); for (i = 0; i < stencil_size; i++) { hypre_fscanf(file, "%d: %d %d %d\n", &idummy, &hypre_IndexX(stencil_shape[i]), &hypre_IndexY(stencil_shape[i]), &hypre_IndexZ(stencil_shape[i])); } stencil = hypre_StructStencilCreate(dim, stencil_size, stencil_shape); /*---------------------------------------- * Initialize the matrix *----------------------------------------*/ matrix = hypre_StructMatrixCreate(comm, grid, stencil); hypre_StructMatrixSymmetric(matrix) = symmetric; hypre_StructMatrixConstantCoefficient(matrix) = constant_coefficient; hypre_StructMatrixSetNumGhost(matrix, num_ghost); hypre_StructMatrixInitialize(matrix); /*---------------------------------------- * Read data *----------------------------------------*/ boxes = hypre_StructGridBoxes(grid); data_space = hypre_StructMatrixDataSpace(matrix); num_values = hypre_StructMatrixNumValues(matrix); hypre_fscanf(file, "\nData:\n"); if ( constant_coefficient==0 ) { hypre_ReadBoxArrayData(file, boxes, data_space, num_values, hypre_StructMatrixData(matrix)); } else { hypre_assert( constant_coefficient<=2 ); hypre_ReadBoxArrayData_CC( file, boxes, data_space, stencil_size, real_stencil_size, constant_coefficient, hypre_StructMatrixData(matrix)); } /*---------------------------------------- * Assemble the matrix *----------------------------------------*/ hypre_StructMatrixAssemble(matrix); /*---------------------------------------- * Close file *----------------------------------------*/ fclose(file); return matrix; }
main_github.c
#include <stdio.h> #include <math.h> #include <gsl/gsl_sf_bessel.h> #include "global.h" #include "utility.h" #include "constants.h" #include <gsl/gsl_cblas.h> #include <time.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <omp.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include "steppmethods.h" #define NUMBER_SAMPLES 1 #define NUMBER_VOLUMES 1 #define SHIFT_LATE 200 #define BINS 100 #define PDF_NR 12 // regular main but with multiple sampling runs over multiple volume fractions phi. int main (void){ FILE *source, *target; source = fopen("global.h", "r"); // get source global.h at program start so as to not copy edits during run if( source == NULL ) { printf("cannot open global.h\n"); exit(EXIT_FAILURE); } SETNUMTHREADS int i,j,l,k,m ,n; Constants(); // draw coupling constants and other derived constants double *t = calloc(LENGTH_T, sizeof(double)); time_t tme = time(NULL); char ordnerLABEL[300]; char ueberordnerLABEL[300]; char ordnerLABEL_original[300]; strftime(ordnerLABEL, sizeof ordnerLABEL, "%A %c", localtime(&tme)); double squares_all_calcs[LENGTH_T][NUMBER_SAMPLES]; // for printing all squares in shared table i = 0; printf("\n%s\n", ordnerLABEL); deleteSpaces(ordnerLABEL,ordnerLABEL); strcpy (ordnerLABEL_original, ordnerLABEL); // keep original, change ordnerLABEL per run double alpha_global = ALPHA; double temp_global = TEMP; // macros from global make trouble when used as print arguments, hence copy to memory sprintf(ueberordnerLABEL,"N=%d_SIM=%d_T=%1.1E_ALPH=%1.1f", OSSZI, SIMULATIONEN, temp_global, alpha_global); char tempLABEL[300]; strcpy (tempLABEL, ordnerLABEL); strcat(tempLABEL, LABELPREFIX); strcat(tempLABEL, ueberordnerLABEL); strcpy (ueberordnerLABEL, tempLABEL); struct stat st2 = {0}; double prob_density[BINS][PDF_NR]; for (i = 0; i < BINS ; i++) { for (j = 0; j < PDF_NR; j++) { prob_density[i][j] = 0.0; } } int check_times[PDF_NR]; for (j = 0; j < PDF_NR; j++) { check_times[j] = (int) ( 10.0 * (j+1)) ; } check_times[PDF_NR-1] = LENGTH_T - 1; for (j = 0; j < PDF_NR; j++) { printf("check times nr %d is %d \n", j, check_times[j]); } printf("\nprint results into "); printf(ueberordnerLABEL); if (stat(ueberordnerLABEL, &st2) == -1) { //Teste ob Ordner existiert, erstelle Ordner mkdir(ueberordnerLABEL, 0700); } if (chdir(ueberordnerLABEL)) // change into directory of simulation { printf("Error changing directory"); return 1; } // reserviere speicher fuer eine Vollständige trajektorie ------------------ m = LENGTH_T; n = ORDER; double **z = (double **) malloc(m * sizeof(double *)); z[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) z[i] = z[0] + n *i; double *squares = calloc(LENGTH_T, sizeof(double)); double *short_correlation_x = calloc(LENGTH_T, sizeof(double)); //short term (1 timestep) correlations double *short_correlation_y = calloc(LENGTH_T, sizeof(double)); double *total_momentum_x = calloc(LENGTH_T, sizeof(double)); double *total_momentum_y = calloc(LENGTH_T, sizeof(double)); double *long_correlation_x = calloc(LENGTH_T, sizeof(double)); //longterm correlation from first timestep double *long_correlation_y = calloc(LENGTH_T, sizeof(double)); double *squares_increase = calloc(LENGTH_T, sizeof(double)); // Increase a from assumed form x^2 0 = a t double real_volumes[10]; double *bathp = calloc(LENGTH_T, sizeof(double)); double *bathq = calloc(LENGTH_T, sizeof(double)); double *px_correlation = calloc(LENGTH_T, sizeof(double)); // save correlation of impulse_x to px(t=0) double *py_correlation = calloc(LENGTH_T, sizeof(double)); double *px_correlation_late = calloc(LENGTH_T, sizeof(double)); // corelation after time SHIFT_LATE double *px_correlation_late2 = calloc(LENGTH_T, sizeof(double)); // corelation after time SHIFT_LATE int n_zplots = SIMULATIONEN; if (SIMULATIONEN > MAX_NR_PLOTS ) n_zplots = MAX_NR_PLOTS; // speichere maximal 30*DIM trajectorien // reserviere speicher fuer samplepfade massives Teilchen zum plotten ------------------ m = LENGTH_T; n = DIM*n_zplots; double **zplots = (double **) malloc(m * sizeof(double *)); zplots[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) zplots[i] = zplots[0] + n *i; // reserviere speicher fuer samplepfade 1. bad Teilchen zum plotten ------------------ m = LENGTH_T; n = DIM*n_zplots; double **qplots = (double **) malloc(m * sizeof(double *)); qplots[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) qplots[i] = qplots[0] + n *i; // reserviere speicher fuer samplepfade letztes bad Teilchen zum plotten ------------------ m = LENGTH_T; n = DIM*n_zplots; double **qlplots = (double **) malloc(m * sizeof(double *)); qlplots[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) qlplots[i] = qlplots[0] + n *i; // reserviere speicher fuer samplepfade zusätzlicher Kac_Zwanzig Kraft Term ------------------ m = LENGTH_T; n = DIM*n_zplots; double **Zwanzig_Force = (double **) malloc(m * sizeof(double *)); Zwanzig_Force[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) Zwanzig_Force[i] = Zwanzig_Force[0] + n *i; // reserviere speicher fuer probability Density ------------------ m = 80; n = 80; double **P_density = (double **) malloc(m * sizeof(double *)); P_density[0] = (double *) malloc(m* n * sizeof(double)); for (i=1; i<m; i++) P_density[i] = P_density[0] + n *i; // reserviere speicher fuer Gitterkoordinaten ------------------ double DENSITY_POINTS = 0.0; for(m=0; m<80; m++) { for(n=0; n<80; n++) { P_density[m][n] = 0.0; } } double *EKIN = calloc(LENGTH_T, sizeof(double)); double *EBAD = calloc(LENGTH_T, sizeof(double)); double *ETOT = calloc(LENGTH_T, sizeof(double)); double *PTOT = calloc(LENGTH_T, sizeof(double)); double *PTOTY = calloc(LENGTH_T, sizeof(double)); double *LTOT = calloc(LENGTH_T, sizeof(double)); // GSL random number Setup for Taus Generator const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_ranlxd2; r = gsl_rng_alloc (T); gsl_rng_set(r, time(NULL)); // Seed with time // RNG setup End // // GSL random number Setup for Taus Generator for global rand const gsl_rng_type * T2; T2 = gsl_rng_ranlxd2; RAND_GLOBAL = gsl_rng_alloc (T2); gsl_rng_set(RAND_GLOBAL, time(NULL)); // Seed with time // RNG setup End // clock_t start, end; printf("evenly spread t \n"); //set up time vec for (i = 0; i < 10; i++) // first ten values in small steps { t[i] = i * TIME_STEPS/10.0; } for (i = 10; i < LENGTH_T; i++) { t[i] = (i-9) * TIME_STEPS; } double squarespace = 0.0; DIFF_COEFF = KBOLTZ * TEMP/GAMMA * sin(M_PI * ALPHA ) / (M_PI * ALPHA); if(ALPHA > 0.95) { DIFF_COEFF = KBOLTZ * TEMP/GAMMA; } LATTICE_SPACING = sqrt(2*DIM*DIFF_COEFF * pow( TIME_ARRIVAL, ALPHA) ); printf("LATTICE_SPACING = %3.3E\n", LATTICE_SPACING); double latlength = LATTICE_SPACING; //------------------------------EIGENTLICHE SIMULATION ------------------------------------------------------------------ int sim_count = 0; start = clock(); time_first_contact = 0.0; int error_count = 0; // for (i = 0; i < SIMULATIONEN; i++) { vec_zero_i(lattice_position,DIM); // set stating cell to zero ! TARGET_CONTACT = 0; FLAG_DUMP_VALUE = 0; // set to 1 if error occures // -------------------------BAD Bath_Setup(y, LABEL, r); // draw intial conditions //---------------------Bad aufgesetzt VVerlet_parallel(ORDER, y, z, t, Poti_Handle); end = clock(); //------------------------------auswertung Integrationsergebnisse if ( !(FLAG_DUMP_VALUE)) // only count result without err { double *kahan_c = calloc(LENGTH_T, sizeof(double)); // double *kahan_t = calloc(LENGTH_T, sizeof(double)); double *kahan_y = calloc(LENGTH_T, sizeof(double)); #pragma omp parallel for for(l = 0; l < LENGTH_T; l++) { if (l>10) { long_correlation_x[l] += z[1][0] *z[l][0]/((double) SIMULATIONEN); long_correlation_y[l] += z[1][1] *z[l][1]/((double) SIMULATIONEN); } kahan_y[l] = z[10][DIM] * z[l][DIM] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP)\ - kahan_c[l]; kahan_t[l] = px_correlation[l] + kahan_y[l]; kahan_c[l] = (kahan_y[l] - px_correlation[l]) - kahan_y[l]; px_correlation[l] = kahan_t[l]; py_correlation[l] += z[10][DIM + 1] * z[l][DIM + 1] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP); px_correlation_late[l] += z[SHIFT_LATE][DIM] * z[l][DIM] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP); px_correlation_late2[l] += z[SHIFT_LATE * 2][DIM] * z[l][DIM] / ((double) SIMULATIONEN) / (mass * KBOLTZ * TEMP); for(j=0; j< DIM; j++) { if (i < n_zplots) { zplots[l][DIM * i + j] = z[l][j]; qplots[l][DIM * i + j] = z[l][2*DIM + j]; double Force_TEMP = 0.0; for(int os = 0; os < OSSZI; os++) { Force_TEMP += (pow(coupling[os],2.0)/pow(ommega[os],2.0) - coupling[os]) * z[l][(2 + 2 * os) * DIM + j]; } Zwanzig_Force[l][DIM * i + j]=Force_TEMP; int i_last = OSSZI -1; qlplots[l][DIM * i + j] = z[l][2*OSSZI*DIM + j]; } squares[l] = squares[l] + pow(z[l][j],2.0)/((double) SIMULATIONEN); EKIN[l] = EKIN[l] + pow(z[l][DIM+j],2.0)/(mass * SIMULATIONEN * KBOLTZ *TEMP * DIM); for(k=0;k<OSSZI;k++) { bathq[l] = bathq[l] + pow(z[l][(2 + 2*k) *DIM +j],2.0)/( (double) (OSSZI*SIMULATIONEN)\ *KBOLTZ*TEMP) * pow(ommega[k],2.0)/2.0;// mittlere potentielle E pro Badteilchen //reskaliert durch kT bathp[l] = bathp[l] + pow(z[l][(3 + 2*k) *DIM +j],2.0)/( (double) (OSSZI*SIMULATIONEN)\ *KBOLTZ*TEMP) /2.0/massq[k]; // mittlere kinetische E pro Badteilchen //reskaliert durch kT EBAD[l] = EBAD[l] + pow(z[l][(3 + 2*k) *DIM +j], 2.0)/2.0\ + 0.5 * pow(ommega[k],2.0) \ * pow(z[l][(2 + 2*k) *DIM +j] - coupling[k]/(pow(ommega[k],2.0) ) * z[l][j] , 2.0) /((double) SIMULATIONEN); } } for(k=0;k<OSSZI;k++) // add p and angular momentum L for bath { PTOT[l] += z[l][(3 + 2*k) *DIM + 0]; PTOTY[l] += z[l][(3 + 2*k) *DIM + 1]; LTOT[l] += (z[l][(2 + 2*k) *DIM + 0] * z[l][(3 + 2*k) *DIM + 1]\ - z[l][(2 + 2*k) *DIM + 1] * z[l][(3 + 2*k) *DIM + 0]) /((double) SIMULATIONEN); } PTOT[l] += z[l][2]; PTOT[l] += z[l][3]; LTOT[l] += (z[l][0] * z[l][3] - z[l][1] * z[l][2]) / ((double) SIMULATIONEN); } if(DIM == 1) { // setup pdf checks double bins = BINS * 1.0; double dx = LATTICE_SPACING/bins; double sims = SIMULATIONEN; for (j = 0; j < PDF_NR; j++) { int t_check = check_times[j]; for (int i_bin = 0; i_bin < BINS; i_bin++) { double lower = -LATTICE_SPACING / 2.0 + i_bin * dx; double upper = -LATTICE_SPACING / 2.0 + (i_bin + 1) * dx; if ( ( z[t_check][0] <= upper) && ( z[t_check][0] > lower) ) { prob_density[i_bin][j] += 1.0/sims; } } } } sim_count += 1; //printf("%d und t %4.2f \n", i ,((double) (end - start))); printf("\r%d von %d mit t/count = %4.2f s und average t_rest =%4.2f h ", sim_count , SIMULATIONEN, (double) (end - start) / sim_count/ THREADS / CLOCKS_PER_SEC ,\ ((double) (end - start) / sim_count/ THREADS/ CLOCKS_PER_SEC * (SIMULATIONEN - sim_count)/3600)); printf(" %d hits on Lat and avrgtcntct = %4.2f",TARGET_CONTACT, time_first_contact/(i+1) ); fflush(stdout); for(l = 0; l < LENGTH_T; l++) { for (int oss_i=0; oss_i < OSSZI; oss_i++) { total_momentum_x[l] += z[l][(3 + 2*oss_i) *DIM + 0]; total_momentum_y[l] += z[l][(3 + 2*oss_i) *DIM + 1]; } total_momentum_x[l] += z[l][(0 + 2*0) *DIM + 0]; total_momentum_y[l] += z[l][(0 + 2*0) *DIM + 1]; } // -------------- end evaluation if---------------------- free(kahan_y); free(kahan_t); free(kahan_c); }else { error_count++; printf("\n err occured at calc %d, calculation dumped, %d totatl errors\n", i,error_count ); i -= 1; // do one more calculation } } tme = time(NULL); char end_label[90]; strftime( end_label, sizeof end_label, "%A %c", localtime(&tme)); printf("\n%s\n", end_label); for(l = 0; l < LENGTH_T; l++) { ETOT[l] = EKIN[l] + EBAD[l]; PTOT[l] = sqrt(pow(PTOT[l]/((double) SIMULATIONEN),2.0) + pow(PTOTY[l]/((double) SIMULATIONEN),2.0)); } for(l = 1; l < LENGTH_T; l++) { squares_increase[l] = (squares[l]- squares[l-1])/(t[l] - t[l-1]); } //------------------------------ENDE SIMULATION, speichere daten ------------------------------------------------------------------ for(m=0; m<80; m++) { for(n=0; n<80; n++) { P_density[m][n] *= 1.0/DENSITY_POINTS; } } FILE *fp; struct stat st = {0}; if (stat(ordnerLABEL, &st) == -1){ //Teste ob Ordner existiert, erstelle Ordner mkdir(ordnerLABEL, 0700); } fp = fopen ("shellscript.sh", "w"); //create Shellscript to start Gnuplot from c main() fprintf(fp, "cd %s\n", ordnerLABEL); fprintf(fp, "gnuplot gnuplot.txt\n"); fprintf(fp, "cd .."); fclose (fp); // copy global.h / // copy to same name into directory, clould be anything different if (chdir(ordnerLABEL)) // change into directory of simulation { printf("Error changing directory"); return 1; } mkdir("plots", 0700); mkdir("trajec", 0700); target = fopen("global.h", "w"); if( target == NULL ) { fclose(source); printf("Press any key to exit...\n"); exit(EXIT_FAILURE); } char ch; while( ( ch = fgetc(source) ) != EOF ) { fputc(ch, target); } printf("File copied successfully.\n"); fclose(target); fp = fopen ("latlength.dat", "w"); fprintf(fp, "%lf \n", latlength); fclose (fp); fp = fopen ("squares_rohdaten.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l], squares[l]); } fclose (fp); fp = fopen ("ekin.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, EKIN[l]); } fclose (fp); fp = fopen ("ekinbath.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, bathp[l]); } fclose (fp); fp = fopen ("ommega.dat", "w"); for(l = 0; l < OSSZI; l++){ fprintf(fp, "%lf\n", ommega[l]); } fclose (fp); fp = fopen ("PTOT.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, PTOT[l]); } fclose (fp); fp = fopen ("P_X.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %1.3e\n", t[l]/TIME_ARRIVAL, total_momentum_x[l]); } fclose (fp); fp = fopen ("P_Y.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %1.3e\n", t[l]/TIME_ARRIVAL, total_momentum_y[l]); } fclose (fp); fp = fopen ("xshortcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, short_correlation_x[l]/latlength/latlength); } fclose (fp); fp = fopen ("yshortcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, short_correlation_y[l]/latlength/latlength); } fclose (fp); fp = fopen ("xlongcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, long_correlation_x[l]/latlength/latlength); } fclose (fp); fp = fopen ("ylongcorellation.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l]/TIME_ARRIVAL, long_correlation_y[l]/latlength/latlength); } fclose (fp); fp = fopen ("PX_corr.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, px_correlation[l]); } fclose (fp); fp = fopen ("PY_corr.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, py_correlation[l]); } fclose (fp); fp = fopen ("PX_corr_late.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, px_correlation_late[l]); } fclose (fp); fp = fopen ("PX_corr_late2.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, px_correlation_late2[l]); } fclose (fp); fp = fopen ("PX_corr_abs.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, fabs(px_correlation[l])); } fclose (fp); fp = fopen ("PX_corr_late_abs.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, fabs(px_correlation_late[l])); } fclose (fp); fp = fopen ("PX_corr_late2_abs.dat", "w"); for(l = 1; l < LENGTH_T; l++){ fprintf(fp, "%1.3E %1.3E\n", t[l]/TIME_ARRIVAL, fabs(px_correlation_late2[l])); } fclose (fp); if (DIM == 1) { fp = fopen ("PDF_1D.dat", "w"); for (int pos = 0; pos < BINS; pos++) { fprintf(fp, "%1.3E ", -LATTICE_SPACING/2.0 + pos*LATTICE_SPACING/((double) BINS)); for(j = 0; j < PDF_NR; j++) { fprintf(fp, "%1.3E ", prob_density[pos][j]); } fprintf(fp,"\n"); } fclose (fp); } // finde größtes s^2/t^alpha zum endzeitpunkt int t_start = 10; double temp_ende = 0.0; for(int i_alpha = 1; i_alpha < 12; i_alpha ++){ double alpha_temp = 0.45 + 0.05 * i_alpha; l = LENGTH_T-1; if (temp_ende < (squares[l] / (pow(t[l], alpha_temp))) ) { temp_ende = squares[l] / (pow(t[l], alpha_temp)); } } if (DIM>1) { char zplotsLABEL[30]; for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/zplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((zplots[l][j +i*DIM])/latlength) ); } fprintf(fp, "\n"); } fclose (fp); } for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/qplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((qplots[l][j +i*DIM])) ); } fprintf(fp, "\n"); } fclose (fp); } for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/Zwanzig_Force%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((Zwanzig_Force[l][j +i*DIM])) ); } fprintf(fp, "\n"); } fclose (fp); } for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/qlplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ for (j = 0; j < DIM ; j++){ fprintf(fp, "%lf ", ((qlplots[l][j +i*DIM])) ); } fprintf(fp, "\n"); } fclose (fp); } } if (DIM==1) { char zplotsLABEL[30]; for (i = 0; i < n_zplots; i++) { sprintf(zplotsLABEL, "trajec/zplots%d.dat",i); fp = fopen (zplotsLABEL, "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, " %lf %lf \n" ,t[l]/TIME_ARRIVAL, zplots[l][i*DIM]/latlength); } fclose (fp); } } fp = fopen ("ETOT.dat", "w"); for(l = 0; l < LENGTH_T; l++){ fprintf(fp, "%lf %lf\n", t[l], ETOT[l]); } fclose (fp); fp = fopen ("DENSITY.dat", "w"); for(m=0; m<80; m++) { for(n=0; n<80; n++) { fprintf(fp, "%1.2e ", P_density[m][n]); } fprintf(fp, "\n"); } fclose (fp); gsl_rng_free (r); gsl_rng_free (RAND_GLOBAL); free(zplots[0]); free(zplots); free(qplots[0]); free(qplots); free(Zwanzig_Force[0]); free(Zwanzig_Force); free(qlplots[0]); free(qlplots); free (P_density[0]); free (P_density); free(t); free(bathp); free(bathq); free(squares_increase); free(short_correlation_x); free(total_momentum_x); free(total_momentum_y); free(short_correlation_y); free(long_correlation_x); free(px_correlation); free(py_correlation); free(px_correlation_late); free(px_correlation_late2); free(long_correlation_y); free(ETOT); free(EKIN); free(EBAD); free(PTOT); free(PTOTY);free(LTOT); free(squares); free(z[0]); free(z); }
repeat_base.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2016, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: David Weese <david.weese@fu-berlin.de> // ========================================================================== #ifndef SEQAN_HEADER_REPEAT_BASE_H #define SEQAN_HEADER_REPEAT_BASE_H #if SEQAN_ENABLE_PARALLELISM #include <seqan/parallel.h> #endif // #if SEQAN_ENABLE_PARALLELISM namespace seqan { /*! * @class Repeat * @headerfile <seqan/index.h> * @brief Store information about a repeat. * * @signature template <typename TPos, typename TPeriod> * struct Repeat; * * @tparam TPeriod Type to use for storing the repeat period. Default: 1 * @tparam TPos Type to use for storing positions. * * @see findRepeats * * @var TPos Repeat::endPosition; * @brief The end position of the repeat of type <tt>TPos</tt>. * * @var TPos Repeat::beginPosition; * @brief The begin position of the repeat of type <tt>TPos</tt>. * * @var TPeriod Repeat::period; * @brief The period of the repeat of type <tt>TPeriod</tt>. */ template <typename TPos, typename TPeriod> struct Repeat { TPos beginPosition; TPos endPosition; TPeriod period; }; template <typename TPos, typename TPeriod> struct Value< Repeat<TPos, TPeriod> > { typedef TPos Type; }; template <typename TPos, typename TPeriod> struct Size< Repeat<TPos, TPeriod> > { typedef TPeriod Type; }; template <typename TSize> struct RepeatFinderParams { TSize minRepeatLen; TSize maxPeriod; }; // custom TSpec for our customized wotd-Index struct TRepeatFinder; template <typename TText> struct Cargo<Index<TText, IndexWotd<TRepeatFinder> > > { typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex; typedef typename Size<TIndex>::Type TSize; typedef RepeatFinderParams<TSize> Type; }; // node predicate template <typename TText, typename TSpec> bool nodePredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it) { // return countOccurrences(it) * nodeDepth(it) >= cargo(container(it)).minRepeatLen; return countOccurrences(it) * repLength(it) >= cargo(container(it)).minRepeatLen; } // monotonic hull template <typename TText, typename TSpec> bool nodeHullPredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it) { // return nodeDepth(it) <= cargo(container(it)).maxPeriod; return repLength(it) <= cargo(container(it)).maxPeriod; } template <typename TPos> struct RepeatLess_ : public std::binary_function<TPos, TPos, bool> { // key less inline bool operator() (TPos const &a, TPos const &b) const { return posLess(a, b); } }; template <typename TValue> inline bool _repeatMaskValue(TValue const &) { // TODO(holtgrew): Maybe use unknownValue<TValue>() instead of specializing for all alphabets, especially since we have Rna5 now and might want Rna5Q later. return false; } template <> inline bool _repeatMaskValue(Dna5 const &val) { return val == unknownValue<Dna5>(); // 'N' } template <> inline bool _repeatMaskValue(Dna5Q const &val) { return val == unknownValue<Dna5Q>(); // 'N' } template <> inline bool _repeatMaskValue(Iupac const &val) { return val == unknownValue<Iupac>(); // 'N' } /* template <> inline bool _repeatMaskValue(AminoAcid val) { return val == 'X'; } */ /*! * @fn findRepeats * @headerfile <seqan/index.h> * @brief Search for repeats in a text. * * @signature void findRepeats(repeatString, text, minRepeatLength[, maxPeriod]); * * @param[out] repeatString A @link String @endlink of @link Repeat @endlink objects. * @param[in] text The text to search repeats in. Types: @link ContainerConcept @endlink * @param[in] minRepeatLength The minimum length each reported repeat must have. * @param[in] maxPeriod Optionally, the maximal period that reported repeats can have. Default: 1 * * Subsequences of undefined values/<tt>N</tt>s will always be reported. * * @section Examples * * The following demonstrates finding repeats of period 3. * * @include demos/dox/index/find_repeats.cpp * * @code{.console} * # of repeats: 15 * i == 0, beginPosition = 3, endPosition = 7, period = 1 * i == 1, beginPosition = 46, endPosition = 53, period = 1 * i == 2, beginPosition = 101, endPosition = 105, period = 1 * i == 3, beginPosition = 105, endPosition = 109, period = 1 * i == 4, beginPosition = 164, endPosition = 169, period = 1 * i == 5, beginPosition = 291, endPosition = 297, period = 1 * i == 6, beginPosition = 319, endPosition = 327, period = 1 * i == 7, beginPosition = 400, endPosition = 404, period = 1 * i == 8, beginPosition = 442, endPosition = 446, period = 1 * i == 9, beginPosition = 468, endPosition = 473, period = 1 * i == 10, beginPosition = 476, endPosition = 480, period = 1 * i == 11, beginPosition = 507, endPosition = 513, period = 1 * i == 12, beginPosition = 561, endPosition = 566, period = 1 * i == 13, beginPosition = 623, endPosition = 627, period = 1 * i == 14, beginPosition = 655, endPosition = 659, period = 1 * @endcode * * @see AlphabetWithUnknownValueConcept#unknownValue * @see Repeat */ // TODO(holtgrew): minRepeatLength is 1-off. // period-1 optimization template <typename TRepeatStore, typename TString, typename TRepeatSize> inline void findRepeats(TRepeatStore &repString, TString const &text, TRepeatSize minRepeatLen) { typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Iterator<TString const>::Type TIterator; typedef typename Size<TString>::Type TSize; #if SEQAN_ENABLE_PARALLELISM typedef typename Value<TString>::Type TValue; if (length(text) > (TSize)(omp_get_max_threads() * 2 * minRepeatLen)) { // std::cerr << ">>> PARALLEL WABOOGIE!" << std::endl; // std::cerr << "omp_get_max_threads() == " << omp_get_max_threads() << std::endl; // Parallel case. // NOTE(holtgrew): The minimum text length check above makes it impossible that more than two chunks are // required to form an otherwise too short repeat. // TODO(holtgrew): Load balancing? Probably not worth it. String<TSize> splitters; String<TRepeatStore> threadLocalStores; // Each threads finds repeats on its chunk in parallel. #pragma omp parallel { // We have to determine the number of available threads at this point. We will use the number of thread // local stores to determin the number of available threads later on. #pragma omp master { // std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl; computeSplitters(splitters, length(text), omp_get_num_threads()); resize(threadLocalStores, omp_get_num_threads()); } // end of #pragma omp master #pragma omp barrier int const t = omp_get_thread_num(); TRepeatStore & store = threadLocalStores[t]; TRepeat rep; rep.beginPosition = 0; rep.endPosition = 0; rep.period = 1; // Flags used for force-adding repeats for the chunks that have a left/right neighbour. bool forceFirst = t > 0; bool forceLast = (t + 1) < omp_get_num_threads(); // #pragma omp critical // std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl; TIterator it = iter(text, splitters[t], Standard()); TIterator itEnd = iter(text, splitters[t + 1], Standard()); if (it != itEnd) { TValue last = *it; TSize repLeft = 0; TSize repRight = 1; for (++it; it != itEnd; ++it, ++repRight) { if (*it != last) { // #pragma omp critical // std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceFirst = " << forceFirst << std::endl; if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceFirst) { forceFirst = false; // insert repeat rep.beginPosition = splitters[t] + repLeft; rep.endPosition = splitters[t] + repRight; // #pragma omp critical // std::cerr << " t == " << t << ", append" << std::endl; appendValue(store, rep); } repLeft = repRight; last = *it; } } // #pragma omp critical // std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceLast = " << forceLast << std::endl; if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceLast) { // Insert repeat but only if it is not already in there. if (empty(store) || (back(store).beginPosition != repLeft && back(store).endPosition != repRight)) { rep.beginPosition = splitters[t] + repLeft; rep.endPosition = splitters[t] + repRight; // #pragma omp critical // std::cerr << " t == " << t << ", append" << std::endl; appendValue(store, rep); } } } } // end of #pragma omp parallel // std::cerr << ",-- REPEATS BEFORE MENDING\n"; // for (unsigned i = 0; i < length(threadLocalStores); ++i) // { // std::cerr << "| i = " << i << std::endl; // for (unsigned j = 0; j < length(threadLocalStores[i]); ++j) // std::cerr << "| threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl; // } // std::cerr << "`--" << std::endl; // Mend the splice points. // // We will copy out infixes described by fromPositions. String<Pair<TSize> > fromPositions; resize(fromPositions, length(threadLocalStores)); for (unsigned i = 0; i < length(fromPositions); ++i) { fromPositions[i].i1 = 0; fromPositions[i].i2 = length(threadLocalStores[i]); } // First, merge repeats spanning blocks. Do this iteratively until all has been merged. bool anyChange; do { anyChange = false; int lastNonEmpty = -1; for (unsigned i = 0; i < length(threadLocalStores); ++i) { if (fromPositions[i].i1 == fromPositions[i].i2) continue; // Skip empty buckets. if (lastNonEmpty != -1) { bool const adjacent = back(threadLocalStores[lastNonEmpty]).endPosition == front(threadLocalStores[i]).beginPosition; bool const charsEqual = text[back(threadLocalStores[lastNonEmpty]).beginPosition] == text[front(threadLocalStores[i]).beginPosition]; if (adjacent && charsEqual) { anyChange = true; back(threadLocalStores[lastNonEmpty]).endPosition = front(threadLocalStores[i]).endPosition; fromPositions[i].i1 += 1; } } if (fromPositions[i].i1 != fromPositions[i].i2) lastNonEmpty = i; } } while (anyChange); // Then, remove any repeats in the beginning and end of blocks that are too short. for (unsigned i = 0; i < length(threadLocalStores); ++i) { if (fromPositions[i].i1 == fromPositions[i].i2) continue; unsigned j = fromPositions[i].i1; TRepeatSize len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition; if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value. len <= minRepeatLen) fromPositions[i].i1 += 1; if (fromPositions[i].i1 == fromPositions[i].i2) continue; j = fromPositions[i].i2 - 1; len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition; if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value. len <= minRepeatLen) fromPositions[i].i2 -= 1; } // Last, build splitters for output in parallel. String<unsigned> outSplitters; appendValue(outSplitters, 0); for (unsigned i = 0; i < length(threadLocalStores); ++i) appendValue(outSplitters, back(outSplitters) + fromPositions[i].i2 - fromPositions[i].i1); // std::cerr << ",-- REPEATS AFTER MENDING\n"; // for (unsigned i = 0; i < length(threadLocalStores); ++i) // { // std::cerr << "| i = " << i << std::endl; // std::cerr << "`--, fromPositions[" << i << "] = (" << fromPositions[i].i1 << ", " << fromPositions[i].i2 << std::endl; // for (unsigned j = 0; j < length(threadLocalStores[i]); ++j) // std::cerr << " | threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl; // } // std::cerr << " `--" << std::endl; // Allocate memory. clear(repString); resize(repString, back(outSplitters)); // Copy back the repeats in parallel. unsigned nt = length(threadLocalStores); (void) nt; // Otherwise, GCC 4.6 warns, does not see it used in pragma clause below. #pragma omp parallel num_threads(nt) { int const t = omp_get_thread_num(); arrayCopy(iter(threadLocalStores[t], fromPositions[t].i1, Standard()), iter(threadLocalStores[t], fromPositions[t].i2, Standard()), iter(repString, outSplitters[t], Standard())); } // end of #pragma omp parallel } else { #endif // #if SEQAN_ENABLE_PARALLELISM // Sequential case. TRepeat rep; rep.period = 1; clear(repString); TIterator it = begin(text, Standard()); TIterator itEnd = end(text, Standard()); if (it == itEnd) return; TSize repLen = 1; for (++it; it != itEnd; ++it) { if (*it != *(it-1)) { if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen) { // insert repeat rep.endPosition = it - begin(text, Standard()); rep.beginPosition = rep.endPosition - repLen; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } repLen = 1; } else ++repLen; } if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen) { // insert repeat rep.endPosition = length(text); rep.beginPosition = rep.endPosition - repLen; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } #if SEQAN_ENABLE_PARALLELISM } #endif // #if SEQAN_ENABLE_PARALLELISM // #pragma omp critical // { // std::cerr << "thread #" << omp_get_thread_num() << " REPEATS:"; // for (unsigned i = 0; i < length(repString); ++i) { // std::cerr << " (" << repString[i].beginPosition << ", " << repString[i].endPosition << ", " << repString[i].period << ")"; // } // std::cerr << std::endl; // } } // TODO(holtgrew): Why for TString const and StringSet<> const? template <typename TRepeatStore, typename TString, typename TSpec, typename TRepeatSize> inline void findRepeats(TRepeatStore &repString, StringSet<TString, TSpec> const &text, TRepeatSize minRepeatLen) { typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Iterator<TString>::Type TIterator; typedef typename Value<TString>::Type TValue; typedef typename Size<TString>::Type TSize; TRepeat rep; rep.period = 1; clear(repString); for (unsigned i = 0; i < length(text); ++i) { TIterator it = begin(text[i], Standard()); TIterator itEnd = end(text[i], Standard()); if (it == itEnd) continue; TValue last = *it; TSize repLeft = 0; TSize repRight = 1; rep.beginPosition.i1 = i; rep.endPosition.i1 = i; for (++it; it != itEnd; ++it, ++repRight) { if (last != *it) { if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen) { // insert repeat rep.beginPosition.i2 = repLeft; rep.endPosition.i2 = repRight; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } repLeft = repRight; last = *it; } } if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen) { // insert repeat rep.beginPosition.i2 = repLeft; rep.endPosition.i2 = repRight; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; appendValue(repString, rep); } } } // main function template <typename TRepeatStore, typename TText, typename TRepeatSize, typename TPeriodSize> void findRepeats(TRepeatStore &repString, TText const &text, TRepeatSize minRepeatLen, TPeriodSize maxPeriod) { typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex; typedef typename Size<TIndex>::Type TSize; typedef typename Iterator<TIndex, TopDown<ParentLinks<> > >::Type TNodeIterator; typedef typename Fibre<TIndex, FibreSA>::Type const TSA; typedef typename Infix<TSA>::Type TOccString; typedef typename Iterator<TOccString>::Type TOccIterator; typedef typename Value<TRepeatStore>::Type TRepeat; typedef typename Value<TOccString>::Type TOcc; typedef std::map<TOcc,TRepeat,RepeatLess_<TOcc> > TRepeatList; if (maxPeriod < 1) return; if (maxPeriod == 1) { findRepeats(repString, text, minRepeatLen); return; } TIndex index(text); TRepeatList list; // set repeat finder parameters cargo(index).minRepeatLen = minRepeatLen; cargo(index).maxPeriod = maxPeriod; TNodeIterator nodeIt(index); TOccIterator itA, itB, itRepBegin, itEnd; TRepeat rep; for (; !atEnd(nodeIt); goNext(nodeIt)) { if (isRoot(nodeIt)) continue; // get occurrences TOccString occ = getOccurrences(nodeIt); itA = begin(occ, Standard()); itEnd = end(occ, Standard()); itRepBegin = itB = itA; TSize repLen = repLength(nodeIt); // representative length if ((TSize)minRepeatLen <= repLen) continue; TSize diff, period = 0; // period of current repeat TSize repeatLen = 0; // overall length of current repeat TSize minLen = minRepeatLen - repLen; // minimum repeat length minus length of representative for (++itB; itB != itEnd; ++itB) { diff = posSub(*itB, *itA); if (diff != period || getSeqNo(*itA) != getSeqNo(*itB)) { // is the repeat long enough? if (repeatLen >= minLen) // is the repeat self overlapping or connected? if (parentRepLength(nodeIt) < period && period <= repLen) { // insert repeat rep.beginPosition = *itRepBegin; rep.endPosition = posAdd(*itA, period); rep.period = period; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; list.insert(std::pair<TOcc,TRepeat>(rep.beginPosition, rep)); } itRepBegin = itA; period = diff; repeatLen = 0; } repeatLen += period; itA = itB; } // is the last repeat long enough? if (repeatLen >= minLen) // is the repeat self overlapping or connected? if (parentRepLength(nodeIt) < period && period <= repLen) { // insert repeat rep.beginPosition = *itRepBegin; rep.endPosition = posAdd(*itA, period); rep.period = period; // std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<std::endl; list.insert(std::pair<TOcc,TRepeat>(rep.beginPosition, rep)); } } // copy low-complex regions to result string clear(repString); reserve(repString, list.size(), Exact()); typename TRepeatList::const_iterator lit = list.begin(); typename TRepeatList::const_iterator litEnd = list.end(); for (TSize i = 0; lit != litEnd; ++lit, ++i) appendValue(repString, (*lit).second); } } // namespace seqan #endif
im2col_dnnlowp.h
#pragma once #ifdef _OPENMP #include <omp.h> #endif #include "caffe2/core/operator.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math/utils.h" namespace caffe2 { namespace math { template <typename T> static void Im2ColNCHW( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const T* data_im, T* data_col, CPUContext* /*context*/, const T& zero_point = 0) { const int output_h = (height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; // Fast path for zero padding and no dilation // From Torch, THNN_(unfolded_copy) if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 && pad_t == 0 && pad_b == 0) { for (auto k = 0; k < channels * kernel_h * kernel_w; k++) { const auto nip = k / (kernel_h * kernel_w); const auto rest = k % (kernel_h * kernel_w); const auto kh = rest / kernel_w; const auto kw = rest % kernel_w; auto* dst = data_col + nip * (kernel_h * kernel_w * output_h * output_w) + kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w); const auto* src = data_im + nip * (height * width); for (const auto y : c10::irange(output_h)) { const auto iy = y * stride_h + kh; const auto ix = kw; if (stride_w == 1) { memcpy( dst + (y * output_w), src + (iy * width + ix), sizeof(T) * output_w); } else { for (const auto x : c10::irange(output_w)) { memcpy( dst + (y * output_w + x), src + (iy * width + ix + x * stride_w), sizeof(T)); } } } } return; } // Fast path for equal padding if (pad_l == pad_r && pad_t == pad_b) { // From Intel, https://github.com/BVLC/caffe/pull/3536 const int pad_h = pad_t; const int pad_w = pad_l; const int channel_size = height * width; for (int channel = channels; channel--; data_im += channel_size) { for (const auto kernel_row : c10::irange(kernel_h)) { for (const auto kernel_col : c10::irange(kernel_w)) { int input_row = -pad_h + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!utils::IsAGeZeroAndALtB(input_row, height)) { for (int output_cols = output_w; output_cols; output_cols--) { *(data_col++) = zero_point; } } else { int input_col = -pad_w + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (utils::IsAGeZeroAndALtB(input_col, width)) { *(data_col++) = data_im[input_row * width + input_col]; } else { *(data_col++) = zero_point; } input_col += stride_w; } } input_row += stride_h; } } } } return; } // Baseline const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; int channels_col = channels * kernel_h * kernel_w; for (const auto c : c10::irange(channels_col)) { int w_offset = c % kernel_w; int h_offset = (c / kernel_w) % kernel_h; int c_im = c / kernel_h / kernel_w; for (const auto h : c10::irange(height_col)) { for (const auto w : c10::irange(width_col)) { int h_pad = h * stride_h - pad_t + h_offset * dilation_h; int w_pad = w * stride_w - pad_l + w_offset * dilation_w; if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) data_col[(c * height_col + h) * width_col + w] = data_im[(c_im * height + h_pad) * width + w_pad]; else data_col[(c * height_col + h) * width_col + w] = zero_point; } } } } template <typename T> static void Im2ColNdNCHW( const int N, const int /* img_size*/, const int col_size, const int* img_shape, const int* col_shape, const int* kernel_shape, const int* stride, const int* dilation, const int* pad, const T* X_data, T* Y_data, CPUContext* /* context */, const T& zero_point = 0) { const int outer_size = col_shape[0]; const int inner_size = col_size / outer_size; const int kernel_size = std::accumulate( kernel_shape, kernel_shape + N, 1, std::multiplies<int>()); std::vector<int> d_offset(N, 0); std::vector<int> d_iter(N, 0); for (const auto i : c10::irange(outer_size)) { // Loop over spatial axes in reverse order to compute a per-axis offset. int offset = i; for (int d_i = N - 1; d_i >= 0; --d_i) { d_offset[d_i] = offset % kernel_shape[d_i]; offset /= kernel_shape[d_i]; } for (const auto j : c10::irange(inner_size)) { // Loop over spatial axes in forward order to compute the indices in the // image and column, and whether the index lies in the padding. const int col_index = i * inner_size + j; int img_index = i / kernel_size; bool is_padding = false; for (const auto d_i : c10::irange(N)) { const int d_img = d_iter[d_i] * stride[d_i] - pad[d_i] + d_offset[d_i] * dilation[d_i]; is_padding |= d_img < 0 || d_img >= img_shape[d_i + 1]; img_index = img_index * img_shape[d_i + 1] + d_img; } Y_data[col_index] = is_padding ? zero_point : X_data[img_index]; utils::IncreaseIndexInDims(N, col_shape + 1, d_iter.data()); } } } /** * The layout of the result is N H W G R S C/G. * Note that groups are pulled out to an outer dimension so that we can use * GEMMs efficiently. */ template <typename T> static void Im2ColNHWC( const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, const T* data_im, T* data_col, CPUContext* /*context*/, const int groups, const T& zero_point) { const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; #ifdef _OPENMP #pragma omp parallel for if (!omp_in_parallel()) #endif for (int h = 0; h < height_col; ++h) { int h_pad = -pad_t + h * stride_h; T* data_col_temp = data_col + h * width_col * kernel_h * kernel_w * channels; int w_pad = -pad_l; for (C10_UNUSED const auto w : c10::irange(width_col)) { int r = 0; for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) { int s = 0; for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { for (const auto g : c10::irange(groups)) { memcpy( data_col_temp + ((g * kernel_h + r) * kernel_w + s) * (channels / groups), data_im + (ih * width + iw) * channels + g * (channels / groups), sizeof(T) * (channels / groups)); } } else { // This should be simply padded with zero. for (const auto g : c10::irange(groups)) { for (int i = 0; i < channels / groups; ++i) { data_col_temp [(((g * kernel_h + r) * kernel_w) + s) * (channels / groups) + i] = zero_point; } } } } // for each iw } // for each ih data_col_temp += kernel_h * kernel_w * channels; w_pad += stride_w; } // for each output pixel } // for each image row } /** * The layout of the result is N T H W G Q R S C/G. * Note that groups are pulled out to an outer dimension so that we can use * GEMMs efficiently. */ template <typename T> static void Im2Col3DNHWC( const int channels, const int num_frames, const int height, const int width, const int kernel_t, const int kernel_h, const int kernel_w, const int dilation_t, const int dilation_h, const int dilation_w, const int pad_p, // previous frame const int pad_t, // top const int pad_l, // left const int pad_n, // next frame const int pad_b, // bottom const int pad_r, // right const int stride_t, const int stride_h, const int stride_w, const T* data_im, T* data_col, CPUContext* /*context*/, const int groups, const T& zero_point) { const int dkernel_t = dilation_t * (kernel_t - 1) + 1; const int dkernel_h = dilation_h * (kernel_h - 1) + 1; const int dkernel_w = dilation_w * (kernel_w - 1) + 1; int frame_col = (num_frames + pad_p + pad_n - dkernel_t) / stride_t + 1; int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1; #ifdef _OPENMP #pragma omp parallel for if (!omp_in_parallel()) #endif for (int t = 0; t < frame_col; ++t) { int t_pad = -pad_p + t * stride_t; for (const auto h : c10::irange(height_col)) { int h_pad = -pad_t + h * stride_h; T* data_col_temp = data_col + (t * height_col + h) * width_col * kernel_t * kernel_h * kernel_w * channels; for (const auto w : c10::irange(width_col)) { int w_pad = -pad_l + w * stride_w; int q = 0; for (int it = t_pad; it < t_pad + dkernel_t; it += dilation_t, ++q) { int r = 0; for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) { int s = 0; for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) { if (it >= 0 && it < num_frames && ih >= 0 && ih < height && iw >= 0 && iw < width) { for (const auto g : c10::irange(groups)) { memcpy( data_col_temp + (((g * kernel_t + q) * kernel_h + r) * kernel_w + s) * (channels / groups), data_im + ((it * height + ih) * width + iw) * channels + g * (channels / groups), sizeof(T) * (channels / groups)); } } else { // This should be simply padded with zero. for (const auto g : c10::irange(groups)) { for (int i = 0; i < channels / groups; ++i) { data_col_temp [((((g * kernel_t + q) * kernel_h + r) * kernel_w) + s) * (channels / groups) + i] = zero_point; } } } } // for each iw } // for each ih } // for each it data_col_temp += kernel_t * kernel_h * kernel_w * channels; } // for each output pixel } // for each image row } // for each frame } } // namespace math } // namespace caffe2
blowfish-tables.h
// Refer to https://github.com/karashiiro/blowfish-tables for the README and latest version of this file. #ifndef MAKE_BLOWFISH_TABLES #define MAKE_BLOWFISH_TABLES #include <stddef.h> // https://en.wikipedia.org/wiki/Modular_exponentiation#Pseudocode // These must be 64-bit unsigned integers, or the base^2 operation will // overflow for large bases. unsigned long long _bftPowermod(unsigned long long base, unsigned long long exp, unsigned long long mod) { unsigned long long result = 1; base %= mod; for (; exp > 0; exp >>= 1) { if (exp & 1) { result *= base; // This probably doesn't generalize well, but that // doesn't matter here since it works and has a huge // impact on performance. if (result > 0xFFFFFFFF) { result %= mod; } } base *= base; if (base > 0xFFFFFFFF) { base %= mod; } } // In a correct powermod implementation, we'd modulus here // since we skip some modulus operations in the loop. // We're only doing this to keep the fractional part of the // sum, though, so that's unnecessary in this case because // we use fPart later. return result; } // https://stackoverflow.com/a/26091248 unsigned int _bftFloor(double x) { unsigned int xi = (unsigned int)x; return x < xi ? xi - 1 : xi; } double _bftFPart(double x) { return x - _bftFloor(x); } // Based on https://giordano.github.io/blog/2017-11-21-hexadecimal-pi/ double _bftCalcPiSum(size_t n, size_t j) { double sum = 0; size_t denominator = j; for (size_t k = 0; k <= n; k++) { sum += (double)_bftPowermod(16, n - k, denominator) / denominator; denominator += 8; } // Completely elide the infinite sum return sum; } /* See https://en.wikipedia.org/wiki/Bailey–Borwein–Plouffe_formula#BBP_digit-extraction_algorithm_for_π */ unsigned int _bftCalcPiFractionalDigit(size_t n) { double sum1 = _bftCalcPiSum(n, 1); double sum2 = _bftCalcPiSum(n, 4); double sum3 = _bftCalcPiSum(n, 5); double sum4 = _bftCalcPiSum(n, 6); return _bftFloor(16 * _bftFPart(4 * sum1 - 2 * sum2 - sum3 - sum4)); } unsigned int _bftMakeGroup(size_t n) { unsigned int group; unsigned int digits[8]; #pragma omp \ parallel for firstprivate(n) shared(digits) \ if(n > 18) for (size_t i = 0; i < 8; i++) { digits[i] = _bftCalcPiFractionalDigit(n + i); } for (int i = 0; i < 8; i++) { digits[i] <<= (32 - (i + 1) * 4); } #pragma omp simd reduction(|:group) for (int i = 0; i < 8; i++) { group |= digits[i]; } return group; } /** Builds a Blowfish p-array into the provided buffer. */ void MakeBlowfishPArray(unsigned int *pArray, size_t subkeyCount) { for (size_t n = 0; n < subkeyCount; n++) { pArray[n] = _bftMakeGroup(n * 8); } } /** Builds Blowfish substitution boxes into the provided buffer. */ void MakeBlowfishSBoxes(unsigned int *sBoxes, size_t boxCount, size_t boxSize, size_t pSubkeys) { size_t subkeyCount = boxCount * boxSize; for (size_t n = 0; n < subkeyCount; n++) { sBoxes[n] = _bftMakeGroup(8 * (pSubkeys + n)); } } #endif /* MAKE_BLOWFISH_TABLES */
3mm.c
/** * 3mm.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" #define BENCHMARK_NAME "3MM" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ #ifdef RUN_POLYBENCH_SIZE #define SIZE 512 #elif RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define NI SIZE #define NJ SIZE #define NK SIZE #define NL SIZE #define NM SIZE /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i * NK + j] = ((DATA_TYPE)i * j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i * NJ + j] = ((DATA_TYPE)i * (j + 1)) / NJ; } } for (i = 0; i < NJ; i++) { for (j = 0; j < NM; j++) { C[i * NM + j] = ((DATA_TYPE)i * (j + 3)) / NL; } } for (i = 0; i < NM; i++) { for (j = 0; j < NL; j++) { D[i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK; } } } int compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) { int i, j, fail; fail = 0; for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { if (percentDiff(G[i * NL + j], G_outputFromGpu[i * NL + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int i, j, k; /* E := A*B */ for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { E[i * NJ + j] = 0; for (k = 0; k < NK; ++k) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } /* F := C*D */ for (i = 0; i < NJ; i++) { for (j = 0; j < NL; j++) { F[i * NL + j] = 0; for (k = 0; k < NM; ++k) { F[i * NL + j] += C[i * NM + k] * D[k * NL + j]; } } } /* G := E*F */ for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { G[i * NL + j] = 0; for (k = 0; k < NJ; ++k) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } } void mm3_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { /* E := A*B */ #pragma omp target teams map(to : A[ : NI *NK], B[ : NK *NJ], C[ : NJ *NM], D[ : NM *NL]) map(from : E[ : NI *NJ], F[ : NJ *NL], G[ : NI *NL]) device(DEVICE_ID) thread_limit(128) { #pragma omp distribute parallel for collapse(2) for (int i = 0; i < NI; i++) { for (int j = 0; j < NJ; j++) { E[i * NJ + j] = 0; for (int k = 0; k < NK; ++k) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } /* F := C*D */ #pragma omp distribute parallel for collapse(2) for (int i = 0; i < NJ; i++) { for (int j = 0; j < NL; j++) { F[i * NL + j] = 0; for (int k = 0; k < NM; ++k) { F[i * NL + j] += C[i * NM + k] * D[k * NL + j]; } } } /* G := E*F */ #pragma omp distribute parallel for collapse(2) for (int i = 0; i < NI; i++) { for (int j = 0; j < NL; j++) { G[i * NL + j] = 0; for (int k = 0; k < NJ; ++k) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } } } int main(int argc, char **argv) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *D; DATA_TYPE *E; DATA_TYPE *F; DATA_TYPE *G; DATA_TYPE *E_outputFromGpu; DATA_TYPE *F_outputFromGpu; DATA_TYPE *G_outputFromGpu; A = (DATA_TYPE *)malloc(NI * NK * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(NK * NJ * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(NJ * NM * sizeof(DATA_TYPE)); D = (DATA_TYPE *)malloc(NM * NL * sizeof(DATA_TYPE)); E = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE)); F = (DATA_TYPE *)malloc(NJ * NL * sizeof(DATA_TYPE)); G = (DATA_TYPE *)malloc(NI * NL * sizeof(DATA_TYPE)); E_outputFromGpu = (DATA_TYPE *)calloc(NI * NJ, sizeof(DATA_TYPE)); F_outputFromGpu = (DATA_TYPE *)calloc(NJ * NL, sizeof(DATA_TYPE)); G_outputFromGpu = (DATA_TYPE *)calloc(NI * NL, sizeof(DATA_TYPE)); /*fprintf(stdout, "<< Linear Algebra: 3 Matrix Multiplications (E=A.B; F=C.D; G=E.F) " "size: %d>>\n", SIZE);*/ printBenchmarkInfo(BENCHMARK_NAME, SIZE); init_array(A, B, C, D); t_start = rtclock(); mm3_OMP(A, B, C, D, E_outputFromGpu, F_outputFromGpu, G_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); mm3_cpu(A, B, C, D, E, F, G); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(G, G_outputFromGpu); #endif free(A); free(B); free(C); free(D); free(E); free(F); free(G); free(G_outputFromGpu); return fail; }
GB_unaryop__minv_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int8 // op(A') function: GB_tran__minv_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr63249.c
/* PR c++/63249 */ /* { dg-do compile } */ /* { dg-options "-Wall -W -fopenmp" } */ int foo (int *v, int A, int B) /* { dg-bogus "set but not used" } */ { int r = 0; int a = 2; /* { dg-bogus "set but not used" } */ int b = 4; /* { dg-bogus "set but not used" } */ #pragma omp target map(to: v[a:b]) r |= v[3]; #pragma omp target map(to: v[A:B]) r |= v[3]; return r; }
nauty.c
/***************************************************************************** * * * Main source file for version 2.2 of nauty. * * * * Copyright (1984-2004) Brendan McKay. All rights reserved. Permission * * Subject to the waivers and disclaimers in nauty.h. * * * * CHANGE HISTORY * * 10-Nov-87 : final changes for version 1.2 * * 5-Dec-87 : renamed to version 1.3 (no changes to this file) * * 28-Sep-88 : renamed to version 1.4 (no changes to this file) * * 23-Mar-89 : changes for version 1.5 : * * - add use of refine1 instead of refine for m==1 * * - changes for new optionblk syntax * * - disable tc_level use for digraphs * * - interposed doref() interface to refine() so that * * options.invarproc can be supported * * - declared local routines static * * 28-Mar-89 : - implemented mininvarlevel/maxinvarlevel < 0 options * * 2-Apr-89 : - added invarproc fields in stats * * 5-Apr-89 : - modified error returns from nauty() * * - added error message to ERRFILE * * - changed MAKEEMPTY uses to EMPTYSET * * 18-Apr-89 : - added MTOOBIG and CANONGNIL * * 8-May-89 : - changed firstcode[] and canoncode[] to short * * 10-Nov-90 : changes for version 1.6 : * * - added dummy routine nauty_null (see dreadnaut.c) * * 2-Sep-91 : changes for version 1.7 : * * - moved MULTIPLY into nauty.h * * 27-Mar-92 : - changed 'n' into 'm' in error message in nauty() * * 5-Jun-93 : renamed to version 1.7+ (no changes to this file) * * 18-Aug-93 : renamed to version 1.8 (no changes to this file) * * 17-Sep-93 : renamed to version 1.9 (no changes to this file) * * 13-Jul-96 : changes for version 2.0 : * * - added dynamic allocation * * 21-Oct-98 : - made short into shortish for BIGNAUTY as needed * * 7-Jan-00 : - allowed n=0 * * - added nauty_check() and a call to it * * 12-Feb-00 : - used better method for target cell memory allocation * * - did a little formating of the code * * 27-May-00 : - fixed error introduced on Feb 12. * * - dynamic allocations in nauty() are now deallocated * * before return if n >= 320. * * 16-Nov-00 : - use function prototypes, change UPROC to void. * * - added argument to tcellproc(), removed nvector * * - now use options.dispatch, options.groupopts is gone. * * 22-Apr-01 : - Added code for compilation into Magma * * - Removed nauty_null() and EXTDEFS * * 2-Oct-01 : - Improved error message for bad dispatch vector * * 21-Nov-01 : - use NAUTYREQUIRED in nauty_check() * * 20-Dec-02 : changes for version 2.2: * * - made tcnode0 global * * - added nauty_freedyn() * * 17-Nov-03 : changed INFINITY to NAUTY_INFINITY * * 14-Sep-04 : extended prototypes even to recursive functions * * 16-Oct-04 : disallow NULL dispatch vector * * * *****************************************************************************/ #define ONE_WORD_SETS #include "nauty.h" #ifdef NAUTY_IN_MAGMA #include "cleanup.e" #define NAUTY_ABORT (-11) #endif typedef struct tcnode_struct { struct tcnode_struct *next; set *tcellptr; } tcnode; /* aproto: header new_nauty_protos.h */ #ifndef NAUTY_IN_MAGMA #if !MAXN static int firstpathnode0(int*, int*, int, int, tcnode*); static int othernode0(int*, int*, int, int, tcnode*); #else static int firstpathnode(int*, int*, int, int); static int othernode(int*, int*, int, int); #endif static void firstterminal(int*, register int); static int processnode(int*, int*, int, int); static void recover(register int*, register int); static void writemarker(int, int, int, int, int, int); #endif #if MAXM==1 #define M 1 #else #define M m #endif #define OPTCALL(proc) if (proc != NULL) (*proc) /* copies of some of the options: */ static boolean getcanon,digraph,writeautoms,domarkers,cartesian; static int linelength,tc_level,mininvarlevel,maxinvarlevel,invararg; static void (*usernodeproc)(graph*,int*,int*,int,int,int,int,int,int); static void (*userautomproc)(int,permutation*,int*,int,int,int); static void (*userlevelproc) (int*,int*,int,int*,statsblk*,int,int,int,int,int,int); static void (*tcellproc)(graph*,int*,int*,int,int,set*,int*,int*,int,int, int(*)(graph*,int*,int*,int,int,int,int),int,int); static void (*invarproc) (graph*,int*,int*,int,int,int,permutation*,int,boolean,int,int); static FILE *outfile; static dispatchvec dispatch; /* local versions of some of the arguments: */ static int m,n; static graph *g,*canong; static int *orbits; static statsblk *stats; /* temporary versions of some stats: */ static long invapplics,invsuccesses; static int invarsuclevel; /* working variables: <the "bsf leaf" is the leaf which is best guess so far at the canonical leaf> */ static int gca_first, /* level of greatest common ancestor of current node and first leaf */ gca_canon, /* ditto for current node and bsf leaf */ noncheaplevel, /* level of greatest ancestor for which cheapautom == FALSE */ allsamelevel, /* level of least ancestor of first leaf for which all descendant leaves are known to be equivalent */ eqlev_first, /* level to which codes for this node match those for first leaf */ eqlev_canon, /* level to which codes for this node match those for the bsf leaf. */ comp_canon, /* -1,0,1 according as code at eqlev_canon+1 is <,==,> that for bsf leaf. Also used for similar purpose during leaf processing */ samerows, /* number of rows of canong which are correct for the bsf leaf BDM:correct description? */ canonlevel, /* level of bsf leaf */ stabvertex, /* point fixed in ancestor of first leaf at level gca_canon */ cosetindex; /* the point being fixed at level gca_first */ static boolean needshortprune; /* used to flag calls to shortprune */ #if !MAXN DYNALLSTAT(set,defltwork,defltwork_sz); DYNALLSTAT(permutation,workperm,workperm_sz); DYNALLSTAT(set,fixedpts,fixedpts_sz); DYNALLSTAT(permutation,firstlab,firstlab_sz); DYNALLSTAT(permutation,canonlab,canonlab_sz); DYNALLSTAT(short,firstcode,firstcode_sz); DYNALLSTAT(short,canoncode,canoncode_sz); DYNALLSTAT(shortish,firsttc,firsttc_sz); DYNALLSTAT(set,active,active_sz); /* In the dynamically allocated case (MAXN=0), each level of recursion needs one set (tcell) to represent the target cell. This is implemented by using a linked list of tcnode anchored at the root of the search tree. Each node points to its child (if any) and to the dynamically allocated tcell. Apart from the the first node of the list, each node always has a tcell good for m up to alloc_m. tcnodes and tcells are kept between calls to nauty, except that they are freed and reallocated if m gets bigger than alloc_m. */ static tcnode tcnode0 = {NULL,NULL}; static int alloc_m = 0; #else static set defltwork[2*MAXM]; /* workspace in case none provided */ static permutation workperm[MAXN]; /* various scratch uses */ static set fixedpts[MAXM]; /* points which were explicitly fixed to get current node */ static permutation firstlab[MAXN], /* label from first leaf */ canonlab[MAXN]; /* label from bsf leaf */ static short firstcode[MAXN+2], /* codes for first leaf */ canoncode[MAXN+2]; /* codes for bsf leaf */ static shortish firsttc[MAXN+2]; /* index of target cell for left path */ static set active[MAXM]; /* used to contain index to cells now active for refinement purposes */ #endif static set *workspace,*worktop; /* first and just-after-last addresses of work area to hold automorphism data */ static set *fmptr; /* pointer into workspace */ /***************************************************************************** * * * This procedure finds generators for the automorphism group of a * * vertex-coloured graph and optionally finds a canonically labelled * * isomorph. A description of the data structures can be found in * * nauty.h and in the "nauty User's Guide". The Guide also gives * * many more details about its use, and implementation notes. * * * * Parameters - <r> means read-only, <w> means write-only, <wr> means both: * * g <r> - the graph * * lab,ptn <rw> - used for the partition nest which defines the colouring * * of g. The initial colouring will be set by the program, * * using the same colour for every vertex, if * * options->defaultptn!=FALSE. Otherwise, you must set it * * yourself (see the Guide). If options->getcanon!=FALSE, * * the contents of lab on return give the labelling of g * * corresponding to canong. This does not change the * * initial colouring of g as defined by (lab,ptn), since * * the labelling is consistent with the colouring. * * active <r> - If this is not NULL and options->defaultptn==FALSE, * * it is a set indicating the initial set of active colours. * * See the Guide for details. * * orbits <w> - On return, orbits[i] contains the number of the * * least-numbered vertex in the same orbit as i, for * * i=0,1,...,n-1. * * options <r> - A list of options. See nauty.h and/or the Guide * * for details. * * stats <w> - A list of statistics produced by the procedure. See * * nauty.h and/or the Guide for details. * * workspace <w> - A chunk of memory for working storage. * * worksize <r> - The number of setwords in workspace. See the Guide * * for guidance. * * m <r> - The number of setwords in sets. This must be at * * least ceil(n / WORDSIZE) and at most MAXM. * * n <r> - The number of vertices. This must be at least 1 and * * at most MAXN. * * canong <w> - The canononically labelled isomorph of g. This is * * only produced if options->getcanon!=FALSE, and can be * * given as NULL otherwise. * * * * FUNCTIONS CALLED: firstpathnode(),updatecan() * * * *****************************************************************************/ void nauty(graph *g_arg, int *lab, int *ptn, set *active_arg, int *orbits_arg, optionblk *options, statsblk *stats_arg, set *ws_arg, int worksize, int m_arg, int n_arg, graph *canong_arg) { register int i; int numcells; #if !MAXN tcnode *tcp,*tcq; #endif /* determine dispatch vector */ if (options->dispatch == NULL) { fprintf(ERRFILE,">E nauty: null dispatch vector\n"); fprintf(ERRFILE,"Maybe you need to recompile\n"); exit(1); } else dispatch = *(options->dispatch); if (options->userrefproc) dispatch.refine = options->userrefproc; else if (dispatch.refine1 && m_arg == 1) dispatch.refine = dispatch.refine1; if (dispatch.refine == NULL || dispatch.updatecan == NULL || dispatch.bestcell == NULL || dispatch.cheapautom == NULL) { fprintf(ERRFILE,">E bad dispatch vector\n"); exit(1); } if (options->usertcellproc) tcellproc = options->usertcellproc; else tcellproc = targetcell; /* check for excessive sizes: */ #if !MAXN if (m_arg > NAUTY_INFINITY/WORDSIZE+1) { stats_arg->errstatus = MTOOBIG; fprintf(ERRFILE,"nauty: need m <= %d, but m=%d\n\n", NAUTY_INFINITY/WORDSIZE+1,m_arg); return; } if (n_arg > NAUTY_INFINITY-2 || n_arg > WORDSIZE * m_arg) { stats_arg->errstatus = NTOOBIG; fprintf(ERRFILE,"nauty: need n <= min(%d,%d*m), but n=%d\n\n", NAUTY_INFINITY-2,WORDSIZE,n_arg); return; } #else if (m_arg > MAXM) { stats_arg->errstatus = MTOOBIG; fprintf(ERRFILE,"nauty: need m <= %d\n\n",MAXM); return; } if (n_arg > MAXN || n_arg > WORDSIZE * m_arg) { stats_arg->errstatus = NTOOBIG; fprintf(ERRFILE, "nauty: need n <= min(%d,%d*m)\n\n",MAXM,WORDSIZE); return; } #endif if (n_arg == 0) /* Special code for Wendy */ { stats_arg->grpsize1 = 1.0; stats_arg->grpsize2 = 0; stats_arg->numorbits = 0; stats_arg->numgenerators = 0; stats_arg->errstatus = 0; stats_arg->numnodes = 1; stats_arg->numbadleaves = 0; stats_arg->maxlevel = 1; stats_arg->tctotal = 0; stats_arg->canupdates = (options->getcanon != 0); stats_arg->invapplics = 0; stats_arg->invsuccesses = 0; stats_arg->invarsuclevel = 0; return; } /* take copies of some args, and options: */ m = m_arg; n = n_arg; nautil_check(WORDSIZE,m,n,NAUTYVERSIONID); OPTCALL(dispatch.check)(WORDSIZE,m,n,NAUTYVERSIONID); #if !MAXN DYNALLOC1(set,defltwork,defltwork_sz,2*m,"nauty"); DYNALLOC1(set,fixedpts,fixedpts_sz,m,"nauty"); DYNALLOC1(set,active,active_sz,m,"nauty"); DYNALLOC1(permutation,workperm,workperm_sz,n,"nauty"); DYNALLOC1(permutation,firstlab,firstlab_sz,n,"nauty"); DYNALLOC1(permutation,canonlab,canonlab_sz,n,"nauty"); DYNALLOC1(short,firstcode,firstcode_sz,n+2,"nauty"); DYNALLOC1(short,canoncode,canoncode_sz,n+2,"nauty"); DYNALLOC1(shortish,firsttc,firsttc_sz,n+2,"nauty"); if (m > alloc_m) { tcp = tcnode0.next; while (tcp != NULL) { tcq = tcp->next; FREES(tcp->tcellptr); FREES(tcp); tcp = tcq; } alloc_m = m; tcnode0.next = NULL; } #endif g = g_arg; orbits = orbits_arg; stats = stats_arg; getcanon = options->getcanon; digraph = options->digraph; writeautoms = options->writeautoms; domarkers = options->writemarkers; cartesian = options->cartesian; linelength = options->linelength; if (digraph) tc_level = 0; else tc_level = options->tc_level; outfile = (options->outfile == NULL ? stdout : options->outfile); usernodeproc = options->usernodeproc; userautomproc = options->userautomproc; userlevelproc = options->userlevelproc; invarproc = options->invarproc; if (options->mininvarlevel < 0 && options->getcanon) mininvarlevel = -options->mininvarlevel; else mininvarlevel = options->mininvarlevel; if (options->maxinvarlevel < 0 && options->getcanon) maxinvarlevel = -options->maxinvarlevel; else maxinvarlevel = options->maxinvarlevel; invararg = options->invararg; if (getcanon) { if (canong_arg == NULL) { stats_arg->errstatus = CANONGNIL; fprintf(ERRFILE, "nauty: canong=NULL but options.getcanon=TRUE\n\n"); return; } else canong = canong_arg; } /* initialize everything: */ if (options->defaultptn) { for (i = 0; i < n; ++i) /* give all verts same colour */ { lab[i] = i; ptn[i] = NAUTY_INFINITY; } ptn[n-1] = 0; EMPTYSET(active,m); ADDELEMENT(active,0); numcells = 1; } else { ptn[n-1] = 0; numcells = 0; for (i = 0; i < n; ++i) if (ptn[i] != 0) ptn[i] = NAUTY_INFINITY; else ++numcells; if (active_arg == NULL) { EMPTYSET(active,m); for (i = 0; i < n; ++i) { ADDELEMENT(active,i); while (ptn[i]) ++i; } } else for (i = 0; i < M; ++i) active[i] = active_arg[i]; } for (i = 0; i < n; ++i) orbits[i] = i; stats->grpsize1 = 1.0; stats->grpsize2 = 0; stats->numgenerators = 0; stats->numnodes = 0; stats->numbadleaves = 0; stats->tctotal = 0; stats->canupdates = 0; stats->numorbits = n; EMPTYSET(fixedpts,m); noncheaplevel = 1; eqlev_canon = -1; /* needed even if !getcanon */ if (worksize >= 2 * m) workspace = ws_arg; else { workspace = defltwork; worksize = 2 * m; } worktop = workspace + (worksize - worksize % (2 * m)); fmptr = workspace; /* here goes: */ stats->errstatus = 0; needshortprune = FALSE; invarsuclevel = NAUTY_INFINITY; invapplics = invsuccesses = 0; #ifdef NAUTY_IN_MAGMA int retval = #endif #if !MAXN firstpathnode0(lab,ptn,1,numcells,&tcnode0); #else firstpathnode(lab,ptn,1,numcells); #endif #ifdef NAUTY_IN_MAGMA if (retval != NAUTY_ABORT) #endif { if (getcanon) { (*dispatch.updatecan)(g,canong,canonlab,samerows,M,n); for (i = 0; i < n; ++i) lab[i] = canonlab[i]; } stats->invarsuclevel = (invarsuclevel == NAUTY_INFINITY ? 0 : invarsuclevel); stats->invapplics = invapplics; stats->invsuccesses = invsuccesses; } #if !MAXN #ifndef NAUTY_IN_MAGMA if (n >= 320) #endif { nautil_freedyn(); OPTCALL(dispatch.freedyn)(); nauty_freedyn(); } #endif } /***************************************************************************** * * * firstpathnode(lab,ptn,level,numcells) produces a node on the leftmost * * path down the tree. The parameters describe the level and the current * * colour partition. The set of active cells is taken from the global set * * 'active'. If the refined partition is not discrete, the leftmost child * * is produced by calling firstpathnode, and the other children by calling * * othernode. * * For MAXN=0 there is an extra parameter: the address of the parent tcell * * structure. * * The value returned is the level to return to. * * * * FUNCTIONS CALLED: (*usernodeproc)(),doref(),cheapautom(), * * firstterminal(),nextelement(),breakout(), * * firstpathnode(),othernode(),recover(),writestats(), * * (*userlevelproc)(),(*tcellproc)(),shortprune() * * * *****************************************************************************/ static int #if !MAXN firstpathnode0(int *lab, int *ptn, int level, int numcells, tcnode *tcnode_parent) #else firstpathnode(int *lab, int *ptn, int level, int numcells) #endif { register int tv; int tv1,index,rtnlevel,tcellsize,tc,childcount,qinvar,refcode; #if !MAXN set *tcell; tcnode *tcnode_this; tcnode_this = tcnode_parent->next; if (tcnode_this == NULL) { if ((tcnode_this = (tcnode*)ALLOCS(1,sizeof(tcnode))) == NULL || (tcnode_this->tcellptr = (set*)ALLOCS(alloc_m,sizeof(set))) == NULL) alloc_error("tcell"); tcnode_parent->next = tcnode_this; tcnode_this->next = NULL; //NOLINT } tcell = tcnode_this->tcellptr; #else set tcell[MAXM]; #endif ++stats->numnodes; /* refine partition : */ doref(g,lab,ptn,level,&numcells,&qinvar,workperm, active,&refcode,dispatch.refine,invarproc, mininvarlevel,maxinvarlevel,invararg,digraph,M,n); firstcode[level] = (short)refcode; if (qinvar > 0) { ++invapplics; if (qinvar == 2) { ++invsuccesses; if (mininvarlevel < 0) mininvarlevel = level; if (maxinvarlevel < 0) maxinvarlevel = level; if (level < invarsuclevel) invarsuclevel = level; } } tc = -1; if (numcells != n) { /* locate new target cell, setting tc to its position in lab, tcell to its contents, and tcellsize to its size: */ (*tcellproc)(g,lab,ptn,level,numcells,tcell,&tcellsize, &tc,tc_level,-1,dispatch.bestcell,M,n); stats->tctotal += tcellsize; } firsttc[level] = tc; /* optionally call user-defined node examination procedure: */ OPTCALL(usernodeproc) (g,lab,ptn,level,numcells,tc,(int)firstcode[level],M,n); if (numcells == n) /* found first leaf? */ { firstterminal(lab,level); OPTCALL(userlevelproc)(lab,ptn,level,orbits,stats,0,1,1,n,0,n); return level-1; } if (noncheaplevel >= level && !(*dispatch.cheapautom)(ptn,level,digraph,n)) noncheaplevel = level + 1; /* use the elements of the target cell to produce the children: */ index = 0; childcount=0; for (tv1 = tv = nextelement(tcell,M,-1); tv >= 0; tv = nextelement(tcell,M,tv)) { if (orbits[tv] == tv) /* ie, not equiv to previous child */ { breakout(lab,ptn,level+1,tc,tv,active,M); ADDELEMENT(fixedpts,tv); cosetindex = tv; if (tv == tv1) { #if !MAXN rtnlevel = firstpathnode0(lab,ptn,level+1,numcells+1, tcnode_this); #else rtnlevel = firstpathnode(lab,ptn,level+1,numcells+1); #endif childcount = 1; gca_first = level; stabvertex = tv1; } else { #if !MAXN rtnlevel = othernode0(lab,ptn,level+1,numcells+1, tcnode_this); #else rtnlevel = othernode(lab,ptn,level+1,numcells+1); #endif ++childcount; } DELELEMENT(fixedpts,tv); if (rtnlevel < level) return rtnlevel; if (needshortprune) { needshortprune = FALSE; shortprune(tcell,fmptr-M,M); } recover(ptn,level); } if (orbits[tv] == tv1) /* ie, in same orbit as tv1 */ ++index; } MULTIPLY(stats->grpsize1,stats->grpsize2,index); if (tcellsize == index && allsamelevel == level + 1) //NOLINT --allsamelevel; if (domarkers) writemarker(level,tv1,index,tcellsize,stats->numorbits,numcells); OPTCALL(userlevelproc)(lab,ptn,level,orbits,stats,tv1,index,tcellsize, numcells,childcount,n); return level-1; } /***************************************************************************** * * * othernode(lab,ptn,level,numcells) produces a node other than an ancestor * * of the first leaf. The parameters describe the level and the colour * * partition. The list of active cells is found in the global set 'active'. * * The value returned is the level to return to. * * * * FUNCTIONS CALLED: (*usernodeproc)(),doref(),refine(),recover(), * * processnode(),cheapautom(),(*tcellproc)(),shortprune(), * * nextelement(),breakout(),othernode(),longprune() * * * *****************************************************************************/ static int #if !MAXN othernode0(int *lab, int *ptn, int level, int numcells, tcnode *tcnode_parent) #else othernode(int *lab, int *ptn, int level, int numcells) #endif { register int tv; int tv1,refcode,rtnlevel,tcellsize,tc,qinvar; short code; #if !MAXN set *tcell; tcnode *tcnode_this; tcnode_this = tcnode_parent->next; if (tcnode_this == NULL) { if ((tcnode_this = (tcnode*)ALLOCS(1,sizeof(tcnode))) == NULL || (tcnode_this->tcellptr = (set*)ALLOCS(alloc_m,sizeof(set))) == NULL) alloc_error("tcell"); tcnode_parent->next = tcnode_this; tcnode_this->next = NULL; //NOLINT } tcell = tcnode_this->tcellptr; #else set tcell[MAXM]; #endif #ifdef NAUTY_IN_MAGMA if (main_seen_interrupt) return NAUTY_ABORT; #endif ++stats->numnodes; /* refine partition : */ doref(g,lab,ptn,level,&numcells,&qinvar,workperm,active, &refcode,dispatch.refine,invarproc,mininvarlevel,maxinvarlevel, invararg,digraph,M,n); code = (short)refcode; if (qinvar > 0) { ++invapplics; if (qinvar == 2) { ++invsuccesses; if (level < invarsuclevel) invarsuclevel = level; } } if (eqlev_first == level - 1 && code == firstcode[level]) eqlev_first = level; if (getcanon) { if (eqlev_canon == level - 1) { if (code < canoncode[level]) comp_canon = -1; else if (code > canoncode[level]) comp_canon = 1; else { comp_canon = 0; eqlev_canon = level; } } if (comp_canon > 0) canoncode[level] = code; } tc = -1; /* If children will be required, find new target cell and set tc to its position in lab, tcell to its contents, and tcellsize to its size: */ if (numcells < n && (eqlev_first == level || (getcanon && comp_canon >= 0))) { if (!getcanon || comp_canon < 0) { (*tcellproc)(g,lab,ptn,level,numcells,tcell,&tcellsize, &tc,tc_level,firsttc[level],dispatch.bestcell,M,n); if (tc != firsttc[level]) eqlev_first = level - 1; } else (*tcellproc)(g,lab,ptn,level,numcells,tcell,&tcellsize, &tc,tc_level,-1,dispatch.bestcell,M,n); stats->tctotal += tcellsize; } /* optionally call user-defined node examination procedure: */ OPTCALL(usernodeproc)(g,lab,ptn,level,numcells,tc,(int)code,M,n); /* call processnode to classify the type of this node: */ rtnlevel = processnode(lab,ptn,level,numcells); if (rtnlevel < level) /* keep returning if necessary */ return rtnlevel; if (needshortprune) { needshortprune = FALSE; shortprune(tcell,fmptr-M,M); } if (!(*dispatch.cheapautom)(ptn,level,digraph,n)) noncheaplevel = level + 1; /* use the elements of the target cell to produce the children: */ for (tv1 = tv = nextelement(tcell,M,-1); tv >= 0; tv = nextelement(tcell,M,tv)) { breakout(lab,ptn,level+1,tc,tv,active,M); ADDELEMENT(fixedpts,tv); #if !MAXN rtnlevel = othernode0(lab,ptn,level+1,numcells+1,tcnode_this); #else rtnlevel = othernode(lab,ptn,level+1,numcells+1); #endif DELELEMENT(fixedpts,tv); if (rtnlevel < level) return rtnlevel; /* use stored automorphism data to prune target cell: */ if (needshortprune) { needshortprune = FALSE; shortprune(tcell,fmptr-M,M); } if (tv == tv1) longprune(tcell,fixedpts,workspace,fmptr,M); recover(ptn,level); } return level-1; } /***************************************************************************** * * * Process the first leaf of the tree. * * * * FUNCTIONS CALLED: NONE * * * *****************************************************************************/ static void firstterminal(int *lab, int level) { register int i; stats->maxlevel = level; gca_first = allsamelevel = eqlev_first = level; firstcode[level+1] = 077777; firsttc[level+1] = -1; for (i = 0; i < n; ++i) firstlab[i] = lab[i]; if (getcanon) { canonlevel = eqlev_canon = gca_canon = level; comp_canon = 0; samerows = 0; for (i = 0; i < n; ++i) canonlab[i] = lab[i]; for (i = 0; i <= level; ++i) canoncode[i] = firstcode[i]; canoncode[level+1] = 077777; stats->canupdates = 1; } } /***************************************************************************** * * * Process a node other than the first leaf or its ancestors. It is first * * classified into one of five types and then action is taken appropriate * * to that type. The types are * * * * 0: Nothing unusual. This is just a node internal to the tree whose * * children need to be generated sometime. * * 1: This is a leaf equivalent to the first leaf. The mapping from * * firstlab to lab is thus an automorphism. After processing the * * automorphism, we can return all the way to the closest invocation * * of firstpathnode. * * 2: This is a leaf equivalent to the bsf leaf. Again, we have found an * * automorphism, but it may or may not be as useful as one from a * * type-1 node. Return as far up the tree as possible. * * 3: This is a new bsf node, provably better than the previous bsf node. * * After updating canonlab etc., treat it the same as type 4. * * 4: This is a leaf for which we can prove that no descendant is * * equivalent to the first or bsf leaf or better than the bsf leaf. * * Return up the tree as far as possible, but this may only be by * * one level. * * * * Types 2 and 3 can't occur if getcanon==FALSE. * * The value returned is the level in the tree to return to, which can be * * anywhere up to the closest invocation of firstpathnode. * * * * FUNCTIONS CALLED: isautom(),updatecan(),testcanlab(),fmperm(), * * writeperm(),(*userautomproc)(),orbjoin(), * * shortprune(),fmptn() * * * *****************************************************************************/ static int processnode(int *lab, int *ptn, int level, int numcells) { register int i,code,save,newlevel; boolean ispruneok; int sr; code = 0; if (eqlev_first != level && (!getcanon || comp_canon < 0)) code = 4; else if (numcells == n) { if (eqlev_first == level) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < n; ++i) workperm[firstlab[i]] = lab[i]; if (gca_first >= noncheaplevel || (*dispatch.isautom)(g,workperm,digraph,M,n)) code = 1; } if (code == 0) { if (getcanon) { sr = 0; if (comp_canon == 0) { if (level < canonlevel) comp_canon = 1; else { (*dispatch.updatecan) (g,canong,canonlab,samerows,M,n); samerows = n; comp_canon = (*dispatch.testcanlab)(g,canong,lab,&sr,M,n); } } if (comp_canon == 0) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < n; ++i) workperm[canonlab[i]] = lab[i]; code = 2; } else if (comp_canon > 0) code = 3; else code = 4; } else code = 4; } } if (code != 0 && level > stats->maxlevel) stats->maxlevel = level; switch (code) { case 0: /* nothing unusual noticed */ return level; case 1: /* lab is equivalent to firstlab */ if (fmptr == worktop) fmptr -= 2 * M; fmperm(workperm,fmptr,fmptr+M,M,n); fmptr += 2 * M; if (writeautoms) writeperm(outfile,workperm,cartesian,linelength,n); stats->numorbits = orbjoin(orbits,workperm,n); ++stats->numgenerators; OPTCALL(userautomproc)(stats->numgenerators,workperm,orbits, stats->numorbits,stabvertex,n); return gca_first; case 2: /* lab is equivalent to canonlab */ if (fmptr == worktop) fmptr -= 2 * M; fmperm(workperm,fmptr,fmptr+M,M,n); fmptr += 2 * M; save = stats->numorbits; stats->numorbits = orbjoin(orbits,workperm,n); if (stats->numorbits == save) { if (gca_canon != gca_first) needshortprune = TRUE; return gca_canon; } if (writeautoms) writeperm(outfile,workperm,cartesian,linelength,n); ++stats->numgenerators; OPTCALL(userautomproc)(stats->numgenerators,workperm,orbits, stats->numorbits,stabvertex,n); if (orbits[cosetindex] < cosetindex) return gca_first; if (gca_canon != gca_first) needshortprune = TRUE; return gca_canon; case 3: /* lab is better than canonlab */ ++stats->canupdates; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < n; ++i) canonlab[i] = lab[i]; canonlevel = eqlev_canon = gca_canon = level; comp_canon = 0; canoncode[level+1] = 077777; samerows = sr; break; case 4: /* non-automorphism terminal node */ ++stats->numbadleaves; break; } /* end of switch statement */ /* only cases 3 and 4 get this far: */ if (level != noncheaplevel) { ispruneok = TRUE; if (fmptr == worktop) fmptr -= 2 * M; fmptn(lab,ptn,noncheaplevel,fmptr,fmptr+M,M,n); fmptr += 2 * M; } else ispruneok = FALSE; save = (allsamelevel > eqlev_canon ? allsamelevel-1 : eqlev_canon); newlevel = (noncheaplevel <= save ? noncheaplevel-1 : save); if (ispruneok && newlevel != gca_first) needshortprune = TRUE; return newlevel; } /***************************************************************************** * * * Recover the partition nest at level 'level' and update various other * * parameters. * * * * FUNCTIONS CALLED: NONE * * * *****************************************************************************/ static void recover(int *ptn, int level) { register int i; for (i = 0; i < n; ++i) if (ptn[i] > level) ptn[i] = NAUTY_INFINITY; if (level < noncheaplevel) noncheaplevel = level + 1; if (level < eqlev_first) eqlev_first = level; if (getcanon) { if (level < gca_canon) gca_canon = level; if (level <= eqlev_canon) { eqlev_canon = level; comp_canon = 0; } } } /***************************************************************************** * * * Write statistics concerning an ancestor of the first leaf. * * * * level = its level * * tv = the vertex fixed to get the first child = the smallest-numbered * * vertex in the target cell * * cellsize = the size of the target cell * * index = the number of vertices in the target cell which were equivalent * * to tv = the index of the stabiliser of tv in the group * * fixing the colour partition at this level * * * * numorbits = the number of orbits of the group generated by all the * * automorphisms so far discovered * * * * numcells = the total number of cells in the equitable partition at this * * level * * * * FUNCTIONS CALLED: itos(),putstring() * * * *****************************************************************************/ static void writemarker(int level, int tv, int index, int tcellsize, int numorbits, int numcells) { char s[30]; #define PUTINT(i) itos(i,s); putstring(outfile,s) #define PUTSTR(x) putstring(outfile,x) PUTSTR("level "); PUTINT(level); PUTSTR(": "); if (numcells != numorbits) { PUTINT(numcells); PUTSTR(" cell"); if (numcells == 1) PUTSTR("; "); else PUTSTR("s; "); } PUTINT(numorbits); PUTSTR(" orbit"); if (numorbits == 1) PUTSTR("; "); else PUTSTR("s; "); PUTINT(tv+labelorg); PUTSTR(" fixed; index "); PUTINT(index); if (tcellsize != index) { PUTSTR("/"); PUTINT(tcellsize); } PUTSTR("\n"); } /***************************************************************************** * * * nauty_check() checks that this file is compiled compatibly with the * * given parameters. If not, call exit(1). * * * *****************************************************************************/ void nauty_check(int wordsize, int m, int n, int version) { if (wordsize != WORDSIZE) { fprintf(ERRFILE,"Error: WORDSIZE mismatch in nauty.c\n"); exit(1); } #if MAXN if (m > MAXM) { fprintf(ERRFILE,"Error: MAXM inadequate in nauty.c\n"); exit(1); } if (n > MAXN) { fprintf(ERRFILE,"Error: MAXN inadequate in nauty.c\n"); exit(1); } #endif #ifdef BIGNAUTY if ((version & 1) == 0) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in nauty.c\n"); exit(1); } #else if ((version & 1) == 1) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in nauty.c\n"); exit(1); } #endif if (version < NAUTYREQUIRED) { fprintf(ERRFILE,"Error: nauty.c version mismatch\n"); exit(1); } } /***************************************************************************** * * * nauty_freedyn() frees all the dynamic memory used in this module. * * * *****************************************************************************/ void nauty_freedyn(void) { #if !MAXN tcnode *tcp,*tcq; tcp = tcnode0.next; while (tcp != NULL) { tcq = tcp->next; FREES(tcp->tcellptr); FREES(tcp); tcp = tcq; } alloc_m = 0; tcnode0.next = NULL; DYNFREE(firsttc,firsttc_sz); DYNFREE(canoncode,canoncode_sz); DYNFREE(firstcode,firstcode_sz); DYNFREE(workperm,workperm_sz); DYNFREE(canonlab,canonlab_sz); DYNFREE(firstlab,firstlab_sz); DYNFREE(defltwork,defltwork_sz); DYNFREE(fixedpts,fixedpts_sz); DYNFREE(active,active_sz); #endif }
matrix.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "matrix.h" static inline matrix_t *matrix_init(const size_t rows, const size_t cols) { matrix_t *m = aligned_alloc(ALIGNMENT, sizeof(matrix_t)); if (!m) { return NULL; } m->rows = rows; m->cols = cols; m->size = rows * cols; m->data = (f64*)(aligned_alloc(ALIGNMENT, rows * cols * sizeof(f64))); if (!m->data) { return NULL; } return m; } matrix_t *matrix_rand_init(const size_t rows, const size_t cols) { matrix_t *m = matrix_init(rows, cols); if (!m) { return NULL; } // seed with `getpid()` rather than `time()` srand(getpid()); for (size_t i = 0; i < m->size; i++) { f64 value = ((f64)(rand() / (f64)(RAND_MAX)) * m->size); u8 sign = rand() % 2; // create random negative values if (sign) { m->data[i] = value; } else { m->data[i] = -value; } } return m; } void matrix_free(matrix_t* m) { if (!m) { printf("error: matrix is already null\n"); return; } free(m->data); free(m); } i32 matrix_write(matrix_t *m, const char *filename) { if (!m || !filename) { printf("error: invalid function arguments (matrix or filename is null)\n"); exit(EXIT_FAILURE); } // open file in write binary mode FILE *fp = fopen(filename, "wb"); if (!fp) { printf("error: failed to open file %s\n", filename); exit(EXIT_FAILURE); } // write matrix dimensions to file fprintf(fp, "%lu %lu\n", m->rows, m->cols); for (size_t i = 0; i < m->size; i++) { // write values to file fprintf(fp, "%lf ", m->data[i]); } fclose(fp); return 1; } matrix_t *matrix_read(const char *filename) { if (!filename) { printf("error: invalid function argument (filename is null)\n"); exit(EXIT_FAILURE); } f64 value = 0.0; size_t rows = 0, cols = 0; // open file in read binary mode FILE *fp = fopen(filename, "rb"); if (!fp) { printf("error: failed to open file %s\n", filename); exit(EXIT_FAILURE); } // read matrix dimensions from file fscanf(fp, "%lu %lu", &rows, &cols); if (!rows || !cols) { printf("error: failed to read from file %s\n", filename); exit(EXIT_FAILURE); } matrix_t *m = matrix_init(rows, cols); if (!m) { return NULL; } for (size_t i = 0; i < m->size; i++) { // read values from file fscanf(fp, "%lf ", &value); m->data[i] = value; } fclose(fp); return m; } inline matrix_t *matrix_multiply(matrix_t *a, matrix_t *b) { if (!a || !b) { printf("error: invalid function arguments (one or both matrices are null)\n"); exit(EXIT_FAILURE); } if (a->cols != b->rows) { printf("error: cannot multiply matrices (A.rows != B.cols)\n"); exit(EXIT_FAILURE); } matrix_t *c = matrix_init(a->rows, b->cols); if (!c) { return NULL; } #pragma omp parallel for for (size_t i = 0; i < a->rows; i++) { for (size_t j = 0; j < b->rows; j++) { float loc = a->data[i * a->cols + j]; for (size_t k = 0; k < b->cols; k++) { c->data[i * c->cols + k] += loc * b->data[j * b->cols + k]; } } } return c; } inline f64 matrix_reduction(matrix_t *m) { if (!m) { printf("error: invalid function argument (matrix is null)\n"); exit(EXIT_FAILURE); } f64 acc = 0.0; for (size_t i = 0; i < m->size; i++) { acc += m->data[i]; } return acc; } inline i32 matrix_scale(matrix_t *a, const f64 c) { if (!a) { printf("error: invalid function argument (matrix is null)\n"); exit(EXIT_FAILURE); } for (size_t i = 0; i < a->size; i++) { a->data[i] *= c; } return 1; }
GB_unaryop__abs_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_uint32 // op(A') function: GB_tran__abs_uint8_uint32 // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_uint32 ( uint8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(8*t1+Nx+7,1024)),floord(16*t2+Nx+3,1024)),floord(4*t3+Nx-9,1024)),floord(16*t1-16*t2+Nz+Nx+5,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),256*t4+254);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_winograd_transform_pack16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack16_avx512(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[8][8][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 16; for (int m = 0; m < 8; m++) { __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 16 * 2); __m512 _r03 = _mm512_load_ps(r0 + 16 * 3); __m512 _r04 = _mm512_load_ps(r0 + 16 * 4); __m512 _r05 = _mm512_load_ps(r0 + 16 * 5); __m512 _r06 = _mm512_load_ps(r0 + 16 * 6); __m512 _r07 = _mm512_load_ps(r0 + 16 * 7); __m512 _tmp0m = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_r04, _r02), _mm512_sub_ps(_r00, _r06)); __m512 _tmp7m = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_r03, _r05), _mm512_sub_ps(_r07, _r01)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[7][m], _tmp7m); __m512 _tmp12a = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _r04, _mm512_add_ps(_r02, _r06)); __m512 _tmp12b = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _r03, _mm512_add_ps(_r01, _r05)); __m512 _tmp1m = _mm512_add_ps(_tmp12a, _tmp12b); __m512 _tmp2m = _mm512_sub_ps(_tmp12a, _tmp12b); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[2][m], _tmp2m); __m512 _tmp34a = _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _r04, _mm512_fmadd_ps(_mm512_set1_ps(0.25f), _r02, _r06)); __m512 _tmp34b = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _r05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _r03, _mm512_mul_ps(_r01, _mm512_set1_ps(0.5f)))); __m512 _tmp3m = _mm512_add_ps(_tmp34a, _tmp34b); __m512 _tmp4m = _mm512_sub_ps(_tmp34a, _tmp34b); _mm512_store_ps(tmp[3][m], _tmp3m); _mm512_store_ps(tmp[4][m], _tmp4m); __m512 _tmp56a = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _r04, _r02), _r06); __m512 _tmp56b = _mm512_fmadd_ps(_mm512_set1_ps(0.5f), _r05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _r03, _mm512_mul_ps(_r01, _mm512_set1_ps(2.f)))); __m512 _tmp5m = _mm512_add_ps(_tmp56a, _tmp56b); __m512 _tmp6m = _mm512_sub_ps(_tmp56a, _tmp56b); _mm512_store_ps(tmp[5][m], _tmp5m); _mm512_store_ps(tmp[6][m], _tmp6m); r0 += w * 16; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 16; float* r0_tm_1 = r0_tm_0 + tiles * 16; float* r0_tm_2 = r0_tm_0 + tiles * 16 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 16 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 16 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 16 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 16 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 16 * 7; for (int m = 0; m < 8; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _tmp06 = _mm512_load_ps(tmp[m][6]); __m512 _tmp07 = _mm512_load_ps(tmp[m][7]); __m512 _r0tm0 = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_tmp04, _tmp02), _mm512_sub_ps(_tmp00, _tmp06)); __m512 _r0tm7 = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_tmp03, _tmp05), _mm512_sub_ps(_tmp07, _tmp01)); __m512 _tmp12a = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _tmp04, _mm512_add_ps(_tmp02, _tmp06)); __m512 _tmp12b = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _tmp03, _mm512_add_ps(_tmp01, _tmp05)); __m512 _r0tm1 = _mm512_add_ps(_tmp12a, _tmp12b); __m512 _r0tm2 = _mm512_sub_ps(_tmp12a, _tmp12b); __m512 _tmp34a = _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _tmp04, _mm512_fmadd_ps(_mm512_set1_ps(0.25f), _tmp02, _tmp06)); __m512 _tmp34b = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _tmp03, _mm512_mul_ps(_tmp01, _mm512_set1_ps(0.5f)))); __m512 _r0tm3 = _mm512_add_ps(_tmp34a, _tmp34b); __m512 _r0tm4 = _mm512_sub_ps(_tmp34a, _tmp34b); __m512 _tmp56a = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _tmp04, _tmp02), _tmp06); __m512 _tmp56b = _mm512_fmadd_ps(_mm512_set1_ps(0.5f), _tmp05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _tmp03, _mm512_mul_ps(_tmp01, _mm512_set1_ps(2.f)))); __m512 _r0tm5 = _mm512_add_ps(_tmp56a, _tmp56b); __m512 _r0tm6 = _mm512_sub_ps(_tmp56a, _tmp56b); _mm512_store_ps(r0_tm_0, _r0tm0); _mm512_store_ps(r0_tm_1, _r0tm1); _mm512_store_ps(r0_tm_2, _r0tm2); _mm512_store_ps(r0_tm_3, _r0tm3); _mm512_store_ps(r0_tm_4, _r0tm4); _mm512_store_ps(r0_tm_5, _r0tm5); _mm512_store_ps(r0_tm_6, _r0tm6); _mm512_store_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 128; r0_tm_1 += tiles * 128; r0_tm_2 += tiles * 128; r0_tm_3 += tiles * 128; r0_tm_4 += tiles * 128; r0_tm_5 += tiles * 128; r0_tm_6 += tiles * 128; r0_tm_7 += tiles * 128; } } } } } static void conv3x3s1_winograd63_transform_output_pack16_avx512(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m512 _bias0 = biasptr ? _mm512_loadu_ps(biasptr + p * 16) : _mm512_setzero_ps(); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[6][8][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 16; const float* output0_tm_1 = output0_tm_0 + tiles * 16; const float* output0_tm_2 = output0_tm_0 + tiles * 16 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 16 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 16 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 16 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 16 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 16 * 7; float* output0 = out0.row(i * 6) + (j * 6) * 16; for (int m = 0; m < 8; m++) { __m512 _out0tm0 = _mm512_load_ps(output0_tm_0); __m512 _out0tm1 = _mm512_load_ps(output0_tm_1); __m512 _out0tm2 = _mm512_load_ps(output0_tm_2); __m512 _out0tm3 = _mm512_load_ps(output0_tm_3); __m512 _out0tm4 = _mm512_load_ps(output0_tm_4); __m512 _out0tm5 = _mm512_load_ps(output0_tm_5); __m512 _out0tm6 = _mm512_load_ps(output0_tm_6); __m512 _out0tm7 = _mm512_load_ps(output0_tm_7); __m512 _tmp024a = _mm512_add_ps(_out0tm1, _out0tm2); __m512 _tmp135a = _mm512_sub_ps(_out0tm1, _out0tm2); __m512 _tmp024b = _mm512_add_ps(_out0tm3, _out0tm4); __m512 _tmp135b = _mm512_sub_ps(_out0tm3, _out0tm4); __m512 _tmp024c = _mm512_add_ps(_out0tm5, _out0tm6); __m512 _tmp135c = _mm512_sub_ps(_out0tm5, _out0tm6); __m512 _tmp0m = _mm512_add_ps(_mm512_add_ps(_out0tm0, _tmp024a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp024c, _tmp024b)); __m512 _tmp2m = _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp024b, _tmp024a)); __m512 _tmp4m = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp024b, _tmp024a)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[2][m], _tmp2m); _mm512_store_ps(tmp[4][m], _tmp4m); __m512 _tmp1m = _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp135b, _tmp135a)); __m512 _tmp3m = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp135b, _tmp135a)); __m512 _tmp5m = _mm512_add_ps(_mm512_add_ps(_out0tm7, _tmp135a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp135b, _tmp135c)); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[3][m], _tmp3m); _mm512_store_ps(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 128; output0_tm_1 += tiles * 128; output0_tm_2 += tiles * 128; output0_tm_3 += tiles * 128; output0_tm_4 += tiles * 128; output0_tm_5 += tiles * 128; output0_tm_6 += tiles * 128; output0_tm_7 += tiles * 128; } for (int m = 0; m < 6; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _tmp06 = _mm512_load_ps(tmp[m][6]); __m512 _tmp07 = _mm512_load_ps(tmp[m][7]); __m512 _tmp024a = _mm512_add_ps(_tmp01, _tmp02); __m512 _tmp135a = _mm512_sub_ps(_tmp01, _tmp02); __m512 _tmp024b = _mm512_add_ps(_tmp03, _tmp04); __m512 _tmp135b = _mm512_sub_ps(_tmp03, _tmp04); __m512 _tmp024c = _mm512_add_ps(_tmp05, _tmp06); __m512 _tmp135c = _mm512_sub_ps(_tmp05, _tmp06); __m512 _out00 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_add_ps(_tmp00, _tmp024a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp024c, _tmp024b))); __m512 _out02 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp024b, _tmp024a))); __m512 _out04 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp024b, _tmp024a))); _mm512_store_ps(output0, _out00); _mm512_store_ps(output0 + 32, _out02); _mm512_store_ps(output0 + 64, _out04); __m512 _out01 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp135b, _tmp135a))); __m512 _out03 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp135b, _tmp135a))); __m512 _out05 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_add_ps(_tmp07, _tmp135a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp135b, _tmp135c))); _mm512_store_ps(output0 + 16, _out01); _mm512_store_ps(output0 + 48, _out03); _mm512_store_ps(output0 + 80, _out05); output0 += outw * 16; } } } } } static void conv3x3s1_winograd43_transform_input_pack16_avx512(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[6][6][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 16; for (int m = 0; m < 6; m++) { __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 16 * 2); __m512 _r03 = _mm512_load_ps(r0 + 16 * 3); __m512 _r04 = _mm512_load_ps(r0 + 16 * 4); __m512 _r05 = _mm512_load_ps(r0 + 16 * 5); __m512 _tmp0m = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _r02, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _r00, _r04)); __m512 _tmp1m = _mm512_fmadd_ps(_mm512_set1_ps(-4.f), _mm512_add_ps(_r01, _r02), _mm512_add_ps(_r04, _r03)); __m512 _tmp2m = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_sub_ps(_r01, _r02), _mm512_sub_ps(_r04, _r03)); __m512 _tmp3m = _mm512_fmadd_ps(_mm512_set1_ps(-2.f), _mm512_sub_ps(_r01, _r03), _mm512_sub_ps(_r04, _r02)); __m512 _tmp4m = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _mm512_sub_ps(_r01, _r03), _mm512_sub_ps(_r04, _r02)); __m512 _tmp5m = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _r03, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _r01, _r05)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[2][m], _tmp2m); _mm512_store_ps(tmp[3][m], _tmp3m); _mm512_store_ps(tmp[4][m], _tmp4m); _mm512_store_ps(tmp[5][m], _tmp5m); r0 += w * 16; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 16; float* r0_tm_1 = r0_tm_0 + tiles * 16; float* r0_tm_2 = r0_tm_0 + tiles * 16 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 16 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 16 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 16 * 5; for (int m = 0; m < 6; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _r0tm0 = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _tmp02, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp00, _tmp04)); __m512 _r0tm1 = _mm512_fmadd_ps(_mm512_set1_ps(-4.f), _mm512_add_ps(_tmp01, _tmp02), _mm512_add_ps(_tmp04, _tmp03)); __m512 _r0tm2 = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_sub_ps(_tmp01, _tmp02), _mm512_sub_ps(_tmp04, _tmp03)); __m512 _r0tm3 = _mm512_fmadd_ps(_mm512_set1_ps(-2.f), _mm512_sub_ps(_tmp01, _tmp03), _mm512_sub_ps(_tmp04, _tmp02)); __m512 _r0tm4 = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _mm512_sub_ps(_tmp01, _tmp03), _mm512_sub_ps(_tmp04, _tmp02)); __m512 _r0tm5 = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _tmp03, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp01, _tmp05)); _mm512_store_ps(r0_tm_0, _r0tm0); _mm512_store_ps(r0_tm_1, _r0tm1); _mm512_store_ps(r0_tm_2, _r0tm2); _mm512_store_ps(r0_tm_3, _r0tm3); _mm512_store_ps(r0_tm_4, _r0tm4); _mm512_store_ps(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 96; r0_tm_1 += tiles * 96; r0_tm_2 += tiles * 96; r0_tm_3 += tiles * 96; r0_tm_4 += tiles * 96; r0_tm_5 += tiles * 96; } } } } } static void conv3x3s1_winograd43_transform_output_pack16_avx512(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m512 _bias0 = biasptr ? _mm512_loadu_ps(biasptr + p * 16) : _mm512_setzero_ps(); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[4][6][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 16; const float* output0_tm_1 = output0_tm_0 + tiles * 16; const float* output0_tm_2 = output0_tm_0 + tiles * 16 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 16 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 16 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 16 * 5; float* output0 = out0.row(i * 4) + (j * 4) * 16; for (int m = 0; m < 6; m++) { __m512 _out0tm0 = _mm512_load_ps(output0_tm_0); __m512 _out0tm1 = _mm512_load_ps(output0_tm_1); __m512 _out0tm2 = _mm512_load_ps(output0_tm_2); __m512 _out0tm3 = _mm512_load_ps(output0_tm_3); __m512 _out0tm4 = _mm512_load_ps(output0_tm_4); __m512 _out0tm5 = _mm512_load_ps(output0_tm_5); __m512 _tmp02a = _mm512_add_ps(_out0tm1, _out0tm2); __m512 _tmp13a = _mm512_sub_ps(_out0tm1, _out0tm2); __m512 _tmp02b = _mm512_add_ps(_out0tm3, _out0tm4); __m512 _tmp13b = _mm512_sub_ps(_out0tm3, _out0tm4); __m512 _tmp0m = _mm512_add_ps(_mm512_add_ps(_out0tm0, _tmp02a), _tmp02b); __m512 _tmp1m = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp13b, _tmp13a); __m512 _tmp2m = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp02b, _tmp02a); __m512 _tmp3m = _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp13b, _mm512_add_ps(_out0tm5, _tmp13a)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[2][m], _tmp2m); _mm512_store_ps(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 96; output0_tm_1 += tiles * 96; output0_tm_2 += tiles * 96; output0_tm_3 += tiles * 96; output0_tm_4 += tiles * 96; output0_tm_5 += tiles * 96; } for (int m = 0; m < 4; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _tmp02a = _mm512_add_ps(_tmp01, _tmp02); __m512 _tmp13a = _mm512_sub_ps(_tmp01, _tmp02); __m512 _tmp02b = _mm512_add_ps(_tmp03, _tmp04); __m512 _tmp13b = _mm512_sub_ps(_tmp03, _tmp04); __m512 _out00 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_add_ps(_tmp00, _tmp02a), _tmp02b)); __m512 _out01 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp13b, _tmp13a)); __m512 _out02 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp02b, _tmp02a)); __m512 _out03 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp13b, _mm512_add_ps(_tmp05, _tmp13a))); _mm512_store_ps(output0, _out00); _mm512_store_ps(output0 + 16, _out01); _mm512_store_ps(output0 + 16 * 2, _out02); _mm512_store_ps(output0 + 16 * 3, _out03); output0 += outw * 16; } } } } } static void conv3x3s1_winograd23_transform_input_pack16_avx512(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; // 0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[4][4][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 2) + (j * 2) * 16; for (int m = 0; m < 4; m++) { __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 16 * 2); __m512 _r03 = _mm512_load_ps(r0 + 16 * 3); __m512 _tmp0m = _mm512_sub_ps(_r00, _r02); __m512 _tmp1m = _mm512_add_ps(_r01, _r02); __m512 _tmp2m = _mm512_sub_ps(_r02, _r01); __m512 _tmp3m = _mm512_sub_ps(_r03, _r01); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[2][m], _tmp2m); _mm512_store_ps(tmp[3][m], _tmp3m); r0 += w * 16; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 16; float* r0_tm_1 = r0_tm_0 + tiles * 16; float* r0_tm_2 = r0_tm_0 + tiles * 16 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 16 * 3; for (int m = 0; m < 4; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _r0tm0 = _mm512_sub_ps(_tmp00, _tmp02); __m512 _r0tm1 = _mm512_add_ps(_tmp01, _tmp02); __m512 _r0tm2 = _mm512_sub_ps(_tmp02, _tmp01); __m512 _r0tm3 = _mm512_sub_ps(_tmp03, _tmp01); _mm512_store_ps(r0_tm_0, _r0tm0); _mm512_store_ps(r0_tm_1, _r0tm1); _mm512_store_ps(r0_tm_2, _r0tm2); _mm512_store_ps(r0_tm_3, _r0tm3); r0_tm_0 += tiles * 16 * 4; r0_tm_1 += tiles * 16 * 4; r0_tm_2 += tiles * 16 * 4; r0_tm_3 += tiles * 16 * 4; } } } } } static void conv3x3s1_winograd23_transform_output_pack16_avx512(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m512 _bias0 = biasptr ? _mm512_loadu_ps(biasptr + p * 16) : _mm512_setzero_ps(); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[2][4][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 16; const float* output0_tm_1 = output0_tm_0 + tiles * 16; const float* output0_tm_2 = output0_tm_0 + tiles * 16 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 16 * 3; float* output0 = out0.row(i * 2) + (j * 2) * 16; for (int m = 0; m < 4; m++) { __m512 _out0tm0 = _mm512_load_ps(output0_tm_0); __m512 _out0tm1 = _mm512_load_ps(output0_tm_1); __m512 _out0tm2 = _mm512_load_ps(output0_tm_2); __m512 _out0tm3 = _mm512_load_ps(output0_tm_3); __m512 _tmp0m = _mm512_add_ps(_mm512_add_ps(_out0tm0, _out0tm1), _out0tm2); __m512 _tmp1m = _mm512_add_ps(_mm512_sub_ps(_out0tm1, _out0tm2), _out0tm3); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[1][m], _tmp1m); output0_tm_0 += tiles * 16 * 4; output0_tm_1 += tiles * 16 * 4; output0_tm_2 += tiles * 16 * 4; output0_tm_3 += tiles * 16 * 4; } for (int m = 0; m < 2; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _out00 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_add_ps(_tmp00, _tmp01), _tmp02)); __m512 _out01 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_sub_ps(_tmp01, _tmp02), _tmp03)); _mm512_store_ps(output0, _out00); _mm512_store_ps(output0 + 16, _out01); output0 += outw * 16; } } } } }
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted * \param use_copy if set to true, directly copy src to dst[i] without looking up the provided row_ids */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { if (stype == kDefaultStorage) { merge_buf_[key].merged = NDArray(shape, pinned_ctx_, false, type); } else { merge_buf_[key].merged = NDArray(stype, shape, pinned_ctx_, true, type); } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (src[0].storage_type() == kDefaultStorage) { return src[0]; } else { // if sparse and only one GPU, always update weight on CPU CopyFromTo(src[0], &buf.merged, priority); return buf.merged; } } if (buf.merged.storage_type() == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf.merged, priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate NDArray based on storage type buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); } else { // buf.merged is a sparse ndarray. std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } NDArray result = buf.merged; Resource rsc = ResourceManager::Get()->Request(result.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, result, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = result; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {result.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); } return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // first copy data to cpu, then broadcast auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) CopyFromTo(buf.merged, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; if (use_copy) { CopyFromTo(src, out, priority); } else { CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool use_sparse_retain = (src.shape()[0] != src.storage_shape()[0]) || (row_id.dtype() != out->aux_type(rowsparse::kIdx)) || (out->ctx().dev_mask() != Context::kGPU); if (use_sparse_retain) { // use sparse_retain op const bool is_to_gpu = out->ctx().dev_mask() == Context::kGPU; NDArray out_cpu = is_to_gpu? NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()) : *out; Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = out_cpu; // get rid of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {out_cpu.var()}, FnProperty::kNormal, priority, PROFILER_MESSAGE("KVStoreSparseRetain")); if (is_to_gpu) { CopyFromTo(out_cpu, out, priority); } } else { // direct copy rows Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { CopyRetainedRowsToGPU(rctx.get_stream<cpu>(), rctx.get_stream<gpu>(), src, row_id, out); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); on_complete(); }, out->ctx(), {src.var(), row_id.var()}, {out->var()}, FnProperty::kCopyToGPU, priority, PROFILER_MESSAGE("KVStoreCopyRetainedRowsToGPU")); } } } } private: /*! * \brief When src is a rsp with full rows, * simply copy retained rows directly from cpu to gpu * without invoking sparse_retain op. */ void CopyRetainedRowsToGPU(mshadow::Stream<cpu>* cpu_stream, mshadow::Stream<gpu>* gpu_stream, const NDArray& src, const NDArray& indices, NDArray* dst) { #if MXNET_USE_CUDA == 1 CHECK_EQ(src.storage_type(), kRowSparseStorage) << "CopyRetainedRowsToGPU expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "CopyRetainedRowsToGPU with src on gpu context not supported"; CHECK_EQ(src.storage_shape()[0], src.shape()[0]) << "CopyRetainedRowsToGPU only supports src rsp with full rows"; CHECK_EQ(indices.storage_type(), kDefaultStorage); CHECK_EQ(indices.ctx().dev_mask(), Context::kCPU); CHECK_EQ(dst->storage_type(), kRowSparseStorage); CHECK_EQ(dst->ctx().dev_mask(), Context::kGPU); CHECK_EQ(indices.dtype(), dst->aux_type(rowsparse::kIdx)) << "CopyRetainedRowsToGPU only supports same data type for idx array and dst aux_data(0)"; if (!src.storage_initialized() || indices.data().Size() == 0U) { op::FillZerosRspImpl(gpu_stream, *dst); return; } using namespace mshadow; const TBlob& src_data = src.data(); const TBlob& idx_data = indices.data(); const size_t row_length = src.shape().ProdShape(1, src.shape().ndim()); const size_t num_rows_retained = idx_data.Size(); dst->CheckAndAlloc({Shape1(num_rows_retained)}); TBlob dst_data = dst->data(); TBlob dst_idx_data = dst->aux_data(rowsparse::kIdx); MSHADOW_TYPE_SWITCH(src.dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(indices.dtype(), IType, { // copy idx array Tensor<gpu, 1, IType> dst_idx_tensor = dst_idx_data.FlatTo1D<gpu, IType>(gpu_stream); const Tensor<cpu, 1, IType> idx_tensor = idx_data.FlatTo1D<cpu, IType>(cpu_stream); Copy(dst_idx_tensor, idx_tensor, gpu_stream); // copy src data const Tensor<cpu, 2, DType> src_data_tensor = src_data.get_with_shape<cpu, 2, DType>( Shape2(src_data.shape_[0], row_length), cpu_stream); Tensor<gpu, 2, DType> dst_data_tensor = dst_data.get_with_shape<gpu, 2, DType>( Shape2(dst_data.shape_[0], row_length), gpu_stream); for (size_t i = 0; i < num_rows_retained; ++i) { Copy(dst_data_tensor[i], src_data_tensor[idx_tensor[i]], gpu_stream); } }) }) #else LOG(FATAL) << "GPU not enabled"; #endif } // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype, stype); } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = buf.merged.storage_type(); if (stype == kDefaultStorage) { CopyFromTo(src[0], &(buf.merged), priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } } else { if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( buf.merged.storage_type(), buf.merged.shape(), buf.merged.ctx(), true, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } } ElementwiseSum(reduce, &buf.merged, priority); return buf.merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) { CopyFromTo(buf.merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; if (use_copy) { CopyFromTo(src, out, priority); } else { CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; const bool is_diff_ctx = out->ctx() != src.ctx(); NDArray out_gpu = is_diff_ctx? NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()) : *out; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray temp = out_gpu; const TBlob& indices = row_id.data(); switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { mxnet::common::SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { mxnet::common::SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, out_gpu.ctx(), {src.var(), row_id.var()}, {out_gpu.var()}, FnProperty::kNormal, priority, PROFILER_MESSAGE("KVStoreSparseRetain")); if (is_diff_ctx) { CopyFromTo(out_gpu, out, priority); } } } } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { cudaSetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } using KeyAttrs = std::tuple<int, TShape, int, NDArrayStorageType>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) { const int key = std::get<0>(sorted_key_attrs_[i]); const TShape& shape = std::get<1>(sorted_key_attrs_[i]); const int type = std::get<2>(sorted_key_attrs_[i]); const NDArrayStorageType stype = std::get<3>(sorted_key_attrs_[i]); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } if (stype == kDefaultStorage) { buf.merged = NDArray(shape, ctx, false, type); } else { buf.merged = NDArray(stype, shape, ctx, true, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } std::vector<KeyAttrs> sorted_key_attrs_; /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the gpu buffer std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; bool inited_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
Parallel.c
#include <omp.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <unistd.h> void printAll(int32_t* a, int32_t* b, int n); int main(int argc, char* argv[]) { if (argc != 3) { fprintf(stderr, "arg missing! ./Parallel n t\n"); return EXIT_FAILURE; } int n = atoi(argv[1]); int t = atoi(argv[2]); printf("%d\n", n); int32_t* a = malloc(sizeof(int32_t) * n); int32_t* b = malloc(sizeof(int32_t) * n); b[0] = 0; for (int i = 0; i < n; i++) a[i] = i; printf("using %ld MB\n", (sizeof(int32_t) * n * 2) / 1000000); printAll(a, b, n); int last[255]; // last element of split array int connector[255] = {0}; // connects splited arrays printf("Threads %d\n ", t); #pragma omp parallel num_threads(t) { int t_nums = omp_get_num_threads(); int t_id = omp_get_thread_num(); int ops = n / t_nums; // nums steps per thread int offset = ops * t_id; for (int i = offset; i < offset + ops; i++) { if ((i % ops) != 0) // skip first element in array b[i] = b[i - 1] + a[i - 1]; } last[t_id] = b[offset + ops - 1] + a[offset + ops - 1]; // sum up last for connect #pragma omp barrier for (int i = 0; i < t_id; i++) { //compute own connector // p time connector[t_id] += last[i]; } #pragma omp barrier for (int i = offset; i < offset + ops; i++) b[i] += connector[t_id]; } printAll(a, b, n); free(b); free(a); } void printAll(int32_t* a, int32_t* b, int n) { if (n > 200) { puts("skipping print"); printf("LAST VALUE:%d\n", b[n - 1]); return; } puts(""); printf("b:"); for (int i = 0; i < n; i++) printf(" %d", b[i]); puts(""); printf("a:"); for (int i = 0; i < n; i++) printf(" %d", a[i]); puts(""); }
nontemporal-1.c
/* { dg-do compile } */ /* { dg-additional-options "-O2" } */ #define N 1024 int a[N], b[N], c[N], d[N]; void foo (void) { int i; #pragma omp simd nontemporal (a, b) for (i = 0; i < N; ++i) a[i] = b[i] + c[i]; #pragma omp simd nontemporal (d) for (i = 0; i < N; ++i) d[i] = 2 * c[i]; }
atomic_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // #if !defined(KRATOS_ATOMIC_UTILITIES_H_INCLUDED ) #define KRATOS_ATOMIC_UTILITIES_H_INCLUDED // System includes // External includes #ifdef KRATOS_SMP_OPENMP #include <omp.h> #endif // Project includes #include "includes/define.h" namespace Kratos { ///@addtogroup KratosCore /** * collection of utilities for atomic updates of simple types. (essentially mimics the omp atomic) */ /** @param target variable being atomically updated by doing target += value * @param value value being added */ template<class TDataType> inline void AtomicAdd(TDataType& target, const TDataType& value ) { #pragma omp atomic target += value; } /** @param target vector variable being atomically updated by doing target += value * @param value vector value being added * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicAdd(TVectorType1& target, const TVectorType2& value ) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicAdd- Sizes are: " << target.size() << " for target and " << value.size() << " for value " <<std::endl; for(unsigned int i=0; i<target.size(); ++i){ AtomicAdd(target[i], value[i]); } } /** @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted */ template<class TDataType> inline void AtomicSub(TDataType& target, const TDataType& value ) { #pragma omp atomic target -= value; } /** @param target vector variable being atomically updated by doing target -= value * @param value vector value being subtracted * Note that the update is not really atomic, but rather is done component by component */ template<class TVectorType1, class TVectorType2> inline void AtomicSub(TVectorType1& target, const TVectorType2& value ) { KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicSub- Sizes are: " << target.size() << " for target and " << value.size() << " for value " <<std::endl; for(unsigned int i=0; i<target.size(); ++i){ AtomicSub(target[i], value[i]); } } /** @param target vector variable being atomically updated by doing target *= value * @param value vector value being multiplied */ template<class TDataType> inline void AtomicMult(TDataType& target, const TDataType& value) { #pragma omp atomic target *= value; } } // namespace Kratos. #endif // KRATOS_ATOMIC_UTILITIES_H_INCLUDED defined
GB_binop__bxnor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxnor_int8 // A.*B function (eWiseMult): GB_AemultB__bxnor_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bxnor_int8 // C+=b function (dense accum): GB_Cdense_accumb__bxnor_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int8 // C=scalar+B GB_bind1st__bxnor_int8 // C=scalar+B' GB_bind1st_tran__bxnor_int8 // C=A+scalar GB_bind2nd__bxnor_int8 // C=A'+scalar GB_bind2nd_tran__bxnor_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ~((x) ^ (y)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT8 || GxB_NO_BXNOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxnor_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxnor_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxnor_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB_bind1st_tran__bxnor_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB_bind2nd_tran__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
2465a791ab986a73ea2bfecf8a7f75e065006167.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; } ; int padfunc(struct dataobj *restrict phi_vec, const int x_M, const int y_M, const int z_M, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, const int abc_z_l_ltkn, const int abc_z_r_rtkn, struct profiler * timers, const int x_m, const int y_m, const int z_m) { float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]]) phi_vec->data; #pragma omp target enter data map(to: phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1) { #pragma omp target teams distribute parallel for collapse(2) for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[abc_x_l + 2][y + 2][z + 2] = phi[12][y + 2][z + 2]; } } } for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1) { #pragma omp target teams distribute parallel for collapse(2) for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[abc_x_r + 2][y + 2][z + 2] = phi[x_M - 8][y + 2][z + 2]; } } } #pragma omp target teams distribute parallel for collapse(1) for (int x = x_m; x <= x_M; x += 1) { for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[x + 2][abc_y_l + 2][z + 2] = phi[x + 2][12][z + 2]; } } for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1) { for (int z = z_m; z <= z_M; z += 1) { phi[x + 2][abc_y_r + 2][z + 2] = phi[x + 2][y_M - 8][z + 2]; } } for (int y = y_m; y <= y_M; y += 1) { for (int abc_z_l = z_m; abc_z_l <= abc_z_l_ltkn + z_m - 1; abc_z_l += 1) { phi[x + 2][y + 2][abc_z_l + 2] = phi[x + 2][y + 2][12]; } for (int abc_z_r = -abc_z_r_rtkn + z_M + 1; abc_z_r <= z_M; abc_z_r += 1) { phi[x + 2][y + 2][abc_z_r + 2] = phi[x + 2][y + 2][z_M - 8]; } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; #pragma omp target update from(phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) #pragma omp target exit data map(release: phi[0:phi_vec->size[0]][0:phi_vec->size[1]][0:phi_vec->size[2]]) return 0; }
myFunc.h
//#define __declspec(x) // Rob Farber #include <stdlib.h> #include <string.h> #include <stdint.h> #include <malloc.h> #include <math.h> //#include <omp.h> #include <cilk/cilk.h> #define MIC_DEV 0 #define ALLOC alloc_if(1) free_if(0) #define FREE alloc_if(0) free_if(1) #define REUSE alloc_if(0) free_if(0) // Use a struct to pass and get data from the objective function typedef struct userData { // Data information int nExamples; __declspec(align(64)) float * restrict example; __declspec(align(64)) float * restrict param; // Timing information int isWarmup; double timeObjFunc; int countObjFunc; double timeDataLoad; double minTime, maxTime; } userData_t; // function to measure wall clock time inline double getTime() { return(omp_get_wtime());} #pragma offload_attribute (push, target (mic)) // helper macros to index into the example array #define IN(i,nExamples,j) (i*nExamples+j) #define OUT(i,nExamples,j) ((i+N_INPUT)*nExamples+j) // Define the Sigmoid #ifdef USE_LINEAR char *desc="generated_PCA_func LINEAR()"; inline float G(float x) { return( x ) ;} #define G_ESTIMATE 0 #elif USE_TANH char *desc="generated_func tanh()"; inline float G(float x) { return( tanhf(x) ) ;} #define G_ESTIMATE 7 // estimate 7 flops for G #elif LOGISTIC char *desc="generated func logistic()"; inline float G(float x) { return( 1.f/(1.f+expf(-x)) ) ;} #define G_ESTIMATE 7 // estimate flops for G #else // Use Elliott function char *desc="generated func Eliott activation: x/(1+fabsf(x))"; inline float G(float x) { return( x/(1.f+fabsf(x)) ) ;} #define G_ESTIMATE 3 // estimate flops for G #endif // This file defines the function to be evaluated #include "fcn.h" double _bigloop( float * restrict example, float * restrict param, int nExamples) { double err=0.; // initialize error here in case offload selected #pragma omp parallel for reduction(+ : err) for(int i=0; i < nExamples; i++) { float d=myFunc(i, param, example, nExamples, NULL); err += d*d; } return err; } // The offload objective function double _objFunc(unsigned int n, const double * restrict x, double * restrict grad, void * restrict my_func_data) { double err; userData_t *uData = (userData_t *) my_func_data; // convert from double to float for speed for(int i=0; i < N_PARAM; i++) uData->param[i]=x[i]; int nExamples = uData->nExamples; __declspec(align(64)) float * restrict example = uData->example; // compiler workaround __declspec(align(64)) float * restrict param = uData->param; // compiler workaround #pragma offload target(mic:MIC_DEV) in(param:length(N_PARAM) REUSE) out(err) in(example:length(0) REUSE) #ifdef USE_OLD_COMPILER { err=0.; // initialize error here in case offload selected #pragma omp parallel for reduction(+ : err) for(int i=0; i < nExamples; i++) { float d=myFunc(i, param, example, nExamples, NULL); err += d*d; } } #else err = _bigloop(example, param, nExamples); #endif return sqrt(err); } #pragma offload_attribute (pop) // The optizimation library callable objective function that gathers timing information double objFunc(unsigned int n, const double * restrict x, double * restrict grad, void * restrict my_func_data) { if(grad) { fprintf(stderr,"Gradient not implemented!\n"); exit(1); } userData_t *uData = (userData_t *) my_func_data; double runTime=getTime(); double err = _objFunc(n,x,grad,my_func_data); runTime = getTime() - runTime; if(!uData->isWarmup) { // Note a maxTime of zero means this is the first call if(uData->maxTime == 0.) { uData->maxTime = uData->minTime = runTime; } uData->maxTime = (uData->maxTime > runTime)?uData->maxTime:runTime; uData->minTime = (uData->minTime < runTime)?uData->minTime:runTime; uData->timeObjFunc += runTime; uData->countObjFunc++; } return( err ); } // Called to free memory and report timing information void fini(userData_t *uData) { int nThreads=0; // Intel recommended way to get the number of threads in offload mode. #pragma offload target(mic:MIC_DEV) out(nThreads) { #pragma omp parallel { #pragma omp single { nThreads = omp_get_num_threads(); } } } // Ouput some information if(!uData->isWarmup) { printf("number OMP threads %d\n", nThreads); printf("DataLoadTime %g\n", uData->timeDataLoad); printf("AveObjTime %g, countObjFunc %d, totalObjTime %g\n", uData->timeObjFunc/uData->countObjFunc, uData->countObjFunc, uData->timeObjFunc); #ifdef FLOP_ESTIMATE printf("Estimated flops in myFunc %d, estimated average GFlop/s %g\n", FLOP_ESTIMATE, (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->timeObjFunc/uData->countObjFunc)/1.e9) ); printf("Estimated maximum GFlop/s %g, minimum GFLop/s %g\n", (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->minTime)/1.e9), (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->maxTime)/1.e9) ); } #endif // free if using offload mode __declspec(align(64)) float * restrict example = uData->example;// compiler workaround __declspec(align(64)) float * restrict param = uData->param;// compiler workaround #pragma offload target(mic:MIC_DEV) in(example: length(0) FREE) in(param : length(0) FREE) {} // free on the host if(uData->example) free(uData->example); uData->example=NULL; if(uData->param) free(uData->param); uData->param=NULL; } void offloadData(userData_t *uData) { #ifdef __INTEL_OFFLOAD int nDevices =_Offload_number_of_devices(); if(nDevices == 0) { fprintf(stderr,"No devices found!\n"); exit -1; } // If necessary, perform offload transfer and allocation double startOffload=getTime(); __declspec(align(64)) float * restrict example = uData->example; // compiler workaround __declspec(align(64)) float * restrict param = uData->param; // compiler workaround int Xsiz = uData->nExamples*EXAMPLE_SIZE; // compiler workaround // Note: the in for param just allocates memory on the device #pragma offload target(mic:MIC_DEV) in(example: length(Xsiz) ALLOC) in(param : length(N_PARAM) ALLOC) {} // set data load time if using offload mode uData->timeDataLoad = getTime() - startOffload; #endif } // loads the binary file of the form: // nInput, nOutput, nExamples // Input [0] [0:nExamples] // Input [1] [0:nExamples] // ... // Output [0] [0:nExamples] // Output [1] [0:nExamples] // ... void init(char*filename, userData_t *uData) { FILE *fn=stdin; // check if reading from stdin if(strcmp("-", filename) != 0) fn=fopen(filename,"r"); if(!fn) { fprintf(stderr,"Cannot open %s\n",filename); exit(1); } // read the header information double startTime=getTime(); int32_t nInput, nOutput; int32_t nExamples; fread(&nInput,sizeof(int32_t), 1, fn); if(nInput != N_INPUT) { fprintf(stderr,"Number of inputs incorrect!\n"); exit(1); } fread(&nOutput,sizeof(int32_t), 1, fn); if(nOutput != N_OUTPUT) { fprintf(stderr,"Number of outputs incorrect!\n"); exit(1); } fread(&nExamples,sizeof(int32_t), 1, fn); if(nExamples <= 0) { fprintf(stderr,"Number of examples incorrect!\n"); exit(1); } uData->nExamples = nExamples; // aligned allocation of the data uData->example=(float*) memalign(64,nExamples*EXAMPLE_SIZE*sizeof(float)); if(!uData->example) { fprintf(stderr,"Not enough memory for examples!\n"); exit(1); } // aligned allocation of the on-device parameters uData->param=(float*) memalign(64,N_PARAM*sizeof(float)); if(!uData->param) { fprintf(stderr,"Not enough memory for the parameters!\n"); exit(1); } // read the data for(int exIndex=0; exIndex < uData->nExamples; exIndex++) { for(int i=0; i < nInput; i++) fread(&uData->example[IN(i,uData->nExamples, exIndex)],1, sizeof(float), fn); for(int i=0; i < nOutput; i++) fread(&uData->example[OUT(i,uData->nExamples, exIndex)],1, sizeof(float), fn); } // offload the data double startOffload=getTime(); __declspec(align(64)) float * restrict example = uData->example; // compiler workaround __declspec(align(64)) float * restrict param = uData->param; // compiler workaround int Xsiz = uData->nExamples*EXAMPLE_SIZE; // compiler workaround // Note: the in just allocates memory on the device #pragma offload target(mic:MIC_DEV) in(example: length(Xsiz) ALLOC) in(param : length(N_PARAM) ALLOC) {} uData->timeDataLoad = getTime() - startTime; if(fn!=stdin) fclose(fn); }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *, DDSVector4 *, unsigned char *, size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *), WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,const MagickBooleanType, ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { register ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; register ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } }
residualbased_simple_steady_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Michael Andre, https://github.com/msandre // #if !defined(KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME ) #define KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace > class ResidualBasedSimpleSteadyScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedSimpleSteadyScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; ///@} ///@name Life Cycle ///@{ ResidualBasedSimpleSteadyScheme(double VelocityRelaxationFactor, double PressureRelaxationFactor, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mVelocityRelaxationFactor(VelocityRelaxationFactor), mPressureRelaxationFactor(PressureRelaxationFactor), mRotationTool(DomainSize,DomainSize+1,SLIP) {} ResidualBasedSimpleSteadyScheme( double VelocityRelaxationFactor, double PressureRelaxationFactor, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mVelocityRelaxationFactor(VelocityRelaxationFactor), mPressureRelaxationFactor(PressureRelaxationFactor), mRotationTool(DomainSize,DomainSize+1,SLIP), mpTurbulenceModel(pTurbulenceModel) {} ~ResidualBasedSimpleSteadyScheme() override {} ///@} ///@name Operators ///@{ double GetVelocityRelaxationFactor() const { return mVelocityRelaxationFactor; } void SetVelocityRelaxationFactor(double factor) { mVelocityRelaxationFactor = factor; } double GetPressureRelaxationFactor() const { return mPressureRelaxationFactor; } void SetPressureRelaxationFactor(double factor) { mPressureRelaxationFactor = factor; } void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { KRATOS_TRY; mRotationTool.RotateVelocities(rModelPart); TSparseSpace::InplaceMult(rDx, mVelocityRelaxationFactor); mpDofUpdater->UpdateDofs(rDofSet,rDx); mRotationTool.RecoverVelocities(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY; rCurrentElement.InitializeNonLinearIteration(CurrentProcessInfo); rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Matrix SteadyLHS; rCurrentElement.CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); if (SteadyLHS.size1() != 0) noalias(LHS_Contribution) += SteadyLHS; // apply slip condition mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); KRATOS_CATCH(""); } void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Condition::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY; rCurrentCondition.InitializeNonLinearIteration(CurrentProcessInfo); rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Matrix SteadyLHS; rCurrentCondition.CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo); if (SteadyLHS.size1() != 0) noalias(LHS_Contribution) += SteadyLHS; // apply slip condition mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH(""); } void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; //TODO: This is very inefficient. Check why the RHS-only methods are not called. Matrix LHS_Contribution; CalculateSystemContributions( rCurrentElement, LHS_Contribution, rRHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateRHSContribution( Condition& rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; //TODO: This is very inefficient. Check why the RHS-only methods are not called. Matrix LHS_Contribution; CalculateSystemContributions( rCurrentCondition, LHS_Contribution, rRHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void FinalizeNonLinIteration(ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { if (mpTurbulenceModel) // If not null { mpTurbulenceModel->Execute(); } ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { KRATOS_INFO_IF("ResidualBasedSimpleSteadyScheme", rModelPart.GetCommunicator().MyPID() == 0) << "Computing OSS projections" << std::endl; const int number_of_nodes = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i = 0; i < number_of_nodes; i++) { ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i; noalias(it_node->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0; it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } const int number_of_elements = rModelPart.NumberOfElements(); array_1d<double, 3 > output; #pragma omp parallel for private(output) for (int i = 0; i < number_of_elements; i++) { ModelPart::ElementIterator it_elem = rModelPart.ElementsBegin() + i; it_elem->Calculate(ADVPROJ,output,CurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); #pragma omp parallel for for (int i = 0; i < number_of_nodes; i++) { ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i; if (it_node->FastGetSolutionStepValue(NODAL_AREA) == 0.0) it_node->FastGetSolutionStepValue(NODAL_AREA) = 1.0; const double area_inverse = 1.0 / it_node->FastGetSolutionStepValue(NODAL_AREA); it_node->FastGetSolutionStepValue(ADVPROJ) *= area_inverse; it_node->FastGetSolutionStepValue(DIVPROJ) *= area_inverse; } } } void FinalizeSolutionStep(ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) { itNode->FastGetSolutionStepValue(REACTION_X,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Y,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Z,0) = 0.0; } for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) { (*itElem)->InitializeNonLinearIteration(rCurrentProcessInfo); (*itElem)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo); Matrix SteadyLHS; (*itElem)->CalculateLocalVelocityContribution(SteadyLHS,RHS_Contribution,rCurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, rA, rDx, rb); } ///@} protected: ///@name Protected Operators ///@{ ///@} private: ///@name Member Variables ///@{ double mVelocityRelaxationFactor; double mPressureRelaxationFactor; CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); ///@} }; ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME defined */
GB_binop__bclr_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_int16 // A.*B function (eWiseMult): GB_AemultB__bclr_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_int16 // C+=b function (dense accum): GB_Cdense_accumb__bclr_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_int16 // C=scalar+B GB_bind1st__bclr_int16 // C=scalar+B' GB_bind1st_tran__bclr_int16 // C=A+scalar GB_bind2nd__bclr_int16 // C=A'+scalar GB_bind2nd_tran__bclr_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_BITCLR (aij, bij, int16_t, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITCLR (x, y, int16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT16 || GxB_NO_BCLR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bclr_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, int16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bclr_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, int16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bclr_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
1.race6.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for schedule(runtime) for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i - 1][j - 1]; } // CHECK: Data Race detected // END
postgres_fmt_plug.c
/* PostgreSQL MD5 challenge-response cracker patch for JtR. Hacked together * during October of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Use Ettercap to get PostgreSQL MD5 challenge-response pairs in JtR format. * E.g. ettercap -Tq -r /home/user/sample.pcap * * Input format: * $postgres$user*salt*hash * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright magnum 2013, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_postgres; #elif FMT_REGISTERS_H john_register_one(&fmt_postgres); #else #include <string.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #define OMP_SCALE 64 #endif #include "md5.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "postgres" #define FORMAT_NAME "PostgreSQL C/R" #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN MEM_ALIGN_NONE #define MAX_USERNAME_LEN 64 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define HEX "0123456789abcdefABCDEF" static struct fmt_tests postgres_tests[] = { {"$postgres$postgres*f063f05d*1d586cc8d137e5f1733f234d224393e8", "openwall"}, {"$postgres$postgres*c31803a2*1c4e11fb51835c3bbe9851ec91ec1375", "password"}, /* $postgre$ is supported but deprecated */ {"$postgre$postgres*684697c8*bf2a64f35feba7bf1b633d60393c1356", "openwall"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { unsigned char user[MAX_USERNAME_LEN + 1]; unsigned char salt[4]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { const char *p; if (strncmp(ciphertext, "$postgres$", 10)) return 0; /* Check hash */ if (!(p = strrchr(ciphertext, '*'))) return 0; if (strspn(&p[1], HEX) != 2*BINARY_SIZE) return 0; /* Check salt */ p -= 9; if (*p != '*') return 0; if (strspn(&p[1], HEX) != 8) return 0; /* Check username length */ if (p - ciphertext - 10 > MAX_USERNAME_LEN) return 0; return 1; } static char *prepare(char *split_fields[10], struct fmt_main *self) { static char out[10 + sizeof(struct custom_salt) + 2*BINARY_SIZE +2+1]; /* Replace deprecated tag */ if (*split_fields[1] && !strncmp(split_fields[1], "$postgre$", 9)) { snprintf(out, sizeof(out), "%s%s", "$postgres$", &split_fields[1][9]); if (valid(out, self)) return out; } return split_fields[1]; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; ctcopy += 10; /* skip over "$postgres$" */ p = strtok(ctcopy, "*"); strnzcpy((char*)cs.user, p, MAX_USERNAME_LEN + 1); p = strtok(NULL, "*"); for (i = 0; i < 4; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static inline void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index++) #endif { MD5_CTX ctx; unsigned char out[32]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Update(&ctx, cur_salt->user, strlen((char*)cur_salt->user)); MD5_Final((unsigned char*)crypt_out[index], &ctx); hex_encode((unsigned char*)crypt_out[index], 16, out); MD5_Init(&ctx); MD5_Update(&ctx, out, 32); MD5_Update(&ctx, cur_salt->salt, 4); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void postgres_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_postgres = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif postgres_tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, postgres_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__identity_int64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_int64 // op(A') function: GB_tran__identity_int64_int64 // C type: int64_t // A type: int64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_int64 ( int64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Utility.h
/* * SVRTK : SVR reconstruction based on MIRTK * * Copyright 2021- King's College London * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once // SVRTK #include "svrtk/Common.h" using namespace std; using namespace mirtk; // Forward declarations namespace svrtk { class Reconstruction; } namespace svrtk::Utility { /** * @brief Extract specific image ROI. * @param stack * @param transformation * @param min_x * @param min_y * @param min_z * @param max_x * @param max_y * @param max_z */ void bbox(const RealImage& stack, const RigidTransformation& transformation, double& min_x, double& min_y, double& min_z, double& max_x, double& max_y, double& max_z); /** * @brief Crop to non zero ROI. * @param image */ void bboxCrop(RealImage& image); /** * @brief Find centroid. * @param image * @param x * @param y * @param z */ void centroid(const RealImage& image, double& x, double& y, double& z); /** * @brief Center stacks. * @param stacks * @param stack_transformations * @param templateNumber */ void CenterStacks(const Array<RealImage>& stacks, Array<RigidTransformation>& stack_transformations, int templateNumber); /// Crops the image according to the mask void CropImage(RealImage& image, const RealImage& mask); /// GF 190416, useful for handling different slice orders void CropImageIgnoreZ(RealImage& image, const RealImage& mask); /** * @brief Compute NCC between images. * @param slice_1 * @param slice_2 * @param threshold * @param count * @return */ double ComputeNCC(const RealImage& slice_1, const RealImage& slice_2, const double threshold = 0.01, double *count = nullptr); double LocalSSIM(const RealImage slice, const RealImage sim_slice ); /** * @brief Compute inter-slice volume NCC (motion metric). * @param input_stack * @param template_stack * @param mask * @return */ double VolumeNCC(RealImage& input_stack, RealImage template_stack, const RealImage& mask); /** * @brief Compute internal stack statistics (volume and inter-slice NCC). * @param input_stack * @param mask * @param mask_volume * @param slice_ncc */ void StackStats(RealImage input_stack, RealImage& mask, double& mask_volume, double& slice_ncc); /** * @brief Run serial global similarity statistics (for the stack selection function). * @param template_stack * @param template_mask * @param stacks * @param masks * @param average_ncc * @param average_volume * @param current_stack_transformations */ void GlobalStackStats(RealImage template_stack, const RealImage& template_mask, const Array<RealImage>& stacks, const Array<RealImage>& masks, double& average_ncc, double& average_volume, Array<RigidTransformation>& current_stack_transformations); // Cropping of stacks based on intersection void StackIntersection(Array<RealImage>& stacks, RealImage template_mask); /** * @brief Run parallel global similarity statistics. * @param stacks * @param masks * @param all_global_ncc_array * @param all_global_volume_array */ void RunParallelGlobalStackStats(const Array<RealImage>& stacks, const Array<RealImage>& masks, Array<double>& all_global_ncc_array, Array<double>& all_global_volume_array); /** * @brief Create average image from the stacks and volumetric transformations. * @param reconstructor * @param stacks * @param stack_transformations * @return */ RealImage CreateAverage(const Reconstruction *reconstructor, const Array<RealImage>& stacks, Array<RigidTransformation>& stack_transformations); /** * @brief Create a mask from dark/black background. * @param reconstructor * @param stacks * @param stack_transformations * @param smooth_mask */ void CreateMaskFromBlackBackground(const Reconstruction *reconstructor, const Array<RealImage>& stacks, Array<RigidTransformation> stack_transformations, double smooth_mask); /** * @brief Transform and resample mask to the space of the image. * @param image * @param mask * @param transformation */ void TransformMask(const RealImage& image, RealImage& mask, const RigidTransformation& transformation); /** * @brief Run stack background filtering (GS based). * @param stacks * @param fg_sigma * @param bg_sigma */ void BackgroundFiltering(Array<RealImage>& stacks, const double fg_sigma, const double bg_sigma); /** * @brief Mask stacks with respect to the reconstruction mask and given transformations. * @param stacks * @param stack_transformations * @param mask */ void MaskStacks(Array<RealImage>& stacks, Array<RigidTransformation>& stack_transformations, const RealImage& mask); /** * @brief Mask slices based on the reconstruction mask. * @param slices * @param mask * @param Transform */ void MaskSlices(Array<RealImage>& slices, const RealImage& mask, function<void(size_t, double&, double&, double&)> Transform); /** * @brief Get slice order parameters. * @param stacks * @param pack_num * @param order * @param step * @param rewinder * @param output_z_slice_order * @param output_t_slice_order */ void GetSliceAcquisitionOrder(const Array<RealImage>& stacks, const Array<int>& pack_num, const Array<int>& order, const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order); /** * @brief Split images into varying N packages. * @param stacks * @param sliceStacks * @param pack_num * @param sliceNums * @param order * @param step * @param rewinder * @param output_z_slice_order * @param output_t_slice_order */ void FlexibleSplitImage(const Array<RealImage>& stacks, Array<RealImage>& sliceStacks, const Array<int>& pack_num, const Array<int>& sliceNums, const Array<int>& order, const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order); /** * @brief Split images based on multi-band acquisition. * @param stacks * @param sliceStacks * @param pack_num * @param sliceNums * @param multiband_vector * @param order * @param step * @param rewinder * @param output_z_slice_order * @param output_t_slice_order */ void FlexibleSplitImagewithMB(const Array<RealImage>& stacks, Array<RealImage>& sliceStacks, const Array<int>& pack_num, const Array<int>& sliceNums, const Array<int>& multiband_vector, const Array<int>& order, const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order); /** * @brief Split stacks into packages based on specific order. * @param stacks * @param pack_num * @param packageStacks * @param order * @param step * @param rewinder * @param output_z_slice_order * @param output_t_slice_order */ void SplitPackages(const Array<RealImage>& stacks, const Array<int>& pack_num, Array<RealImage>& packageStacks, const Array<int>& order, const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order); /** * @brief Split stacks into packages based on specific order (multi-band based). * @param stacks * @param pack_num * @param packageStacks * @param multiband_vector * @param order * @param step * @param rewinder * @param output_z_slice_order * @param output_t_slice_order */ void SplitPackageswithMB(const Array<RealImage>& stacks, const Array<int>& pack_num, Array<RealImage>& packageStacks, const Array<int>& multiband_vector, const Array<int>& order, const int step, const int rewinder, Array<int>& output_z_slice_order, Array<int>& output_t_slice_order); /** * @brief Split image into N packages. * @param image * @param packages * @param stacks */ void SplitImage(const RealImage& image, const int packages, Array<RealImage>& stacks); /** * @brief Split image into 2 packages. * @param image * @param packages * @param stacks */ void SplitImageEvenOdd(const RealImage& image, const int packages, Array<RealImage>& stacks); /** * @brief Split image into 4 packages. * @param image * @param packages * @param stacks * @param iter */ void SplitImageEvenOddHalf(const RealImage& image, const int packages, Array<RealImage>& stacks, const int iter); /** * @brief Split image into 2 packages. * @param image * @param stacks */ void HalfImage(const RealImage& image, Array<RealImage>& stacks); //////////////////////////////////////////////////////////////////////////////// // Inline/template definitions //////////////////////////////////////////////////////////////////////////////// /// Clear and preallocate memory for a vector template<typename VectorType> inline void ClearAndReserve(vector<VectorType>& vectorVar, size_t reserveSize) { vectorVar.clear(); vectorVar.reserve(reserveSize); } //------------------------------------------------------------------- /// Clear and resize memory for vectors template<typename VectorType> inline void ClearAndResize(vector<VectorType>& vectorVar, size_t reserveSize, const VectorType& defaultValue = VectorType()) { vectorVar.clear(); vectorVar.resize(reserveSize, defaultValue); } //------------------------------------------------------------------- /** * @brief Binarise mask. * If template image has been masked instead of creating the mask in separate * file, this function can be used to create mask from the template image. * @param image * @param threshold * @return */ inline RealImage CreateMask(RealImage image, double threshold = 0.5) { RealPixel *ptr = image.Data(); #pragma omp parallel for for (int i = 0; i < image.NumberOfVoxels(); i++) ptr[i] = ptr[i] > threshold ? 1 : 0; return image; } //------------------------------------------------------------------- /// Normalise and threshold mask inline RealImage ThresholdNormalisedMask(RealImage image, double threshold) { RealPixel smin, smax; image.GetMinMax(&smin, &smax); if (smax > 0) image /= smax; return CreateMask(image, threshold); } //------------------------------------------------------------------- /// Reset image origin and save it into the output transformation (RealImage/GreyImage/ByteImage) template<typename ImageType> inline void ResetOrigin(GenericImage<ImageType>& image, RigidTransformation& transformation) { double ox, oy, oz; image.GetOrigin(ox, oy, oz); image.PutOrigin(0, 0, 0); transformation.PutTranslationX(ox); transformation.PutTranslationY(oy); transformation.PutTranslationZ(oz); transformation.PutRotationX(0); transformation.PutRotationY(0); transformation.PutRotationZ(0); } //------------------------------------------------------------------- /// Perform nonlocal means filtering inline void NLMFiltering(Array<RealImage>& stacks) { #pragma omp parallel for for (int i = 0; i < stacks.size(); i++) { stacks[i] = NLDenoising::Run(stacks[i], 3, 1); stacks[i].Write((boost::format("denoised-%1%.nii.gz") % i).str().c_str()); } } //------------------------------------------------------------------- /// Invert stack transformations inline void InvertStackTransformations(Array<RigidTransformation>& stack_transformations) { //for each stack #pragma omp parallel for for (size_t i = 0; i < stack_transformations.size(); i++) { //invert transformation for the stacks stack_transformations[i].Invert(); stack_transformations[i].UpdateParameter(); } } //------------------------------------------------------------------- /// Mask input volume inline void MaskImage(RealImage& image, const RealImage& mask, double padding = -1) { if (image.NumberOfVoxels() != mask.NumberOfVoxels()) throw runtime_error("Cannot mask the image - different dimensions"); RealPixel *pr = image.Data(); const RealPixel *pm = mask.Data(); #pragma omp parallel for for (int i = 0; i < image.NumberOfVoxels(); i++) if (pm[i] == 0) pr[i] = padding; } //------------------------------------------------------------------- /// Rescale image ignoring negative values inline void Rescale(RealImage& img, double max) { // Get lower and upper bound RealPixel min_val, max_val; img.GetMinMax(&min_val, &max_val); RealPixel *ptr = img.Data(); #pragma omp parallel for for (int i = 0; i < img.NumberOfVoxels(); i++) if (ptr[i] > 0) ptr[i] = double(ptr[i]) / double(max_val) * max; } //------------------------------------------------------------------- /// Apply static mask to 4d volume inline void StaticMaskVolume4D(RealImage& volume, const RealImage& mask, const double padding) { #pragma omp parallel for for (int i = 0; i < volume.GetX(); i++) for (int j = 0; j < volume.GetY(); j++) for (int k = 0; k < volume.GetZ(); k++) if (mask(i, j, k) == 0) for (int t = 0; t < volume.GetT(); t++) volume(i, j, k, t) = padding; } }
beta_projectors_base.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file beta_projectors_base.h * * \brief Contains declaration and implementation of sirius::Beta_projectors_base class. */ #ifndef __BETA_PROJECTORS_BASE_H__ #define __BETA_PROJECTORS_BASE_H__ namespace sirius { #ifdef __GPU extern "C" void create_beta_gk_gpu(int num_atoms, int num_gkvec, int const* beta_desc, double_complex const* beta_gk_t, double const* gkvec, double const* atom_pos, double_complex* beta_gk); #endif enum beta_desc_idx { nbf = 0, offset = 1, offset_t = 2, ia = 3 }; struct beta_chunk_t { /// Number of beta-projectors in the current chunk. int num_beta_; /// Number of atoms in the current chunk. int num_atoms_; /// Offset in the global index of beta projectors. int offset_; /// Descriptor of block of beta-projectors for an atom. mdarray<int, 2> desc_; /// Positions of atoms. mdarray<double, 2> atom_pos_; }; /// Base class for beta-projectors, gradient of beta-projectors and strain derivatives of beta-projectors. template <int N> class Beta_projectors_base { protected: Simulation_context& ctx_; /// List of G+k vectors. Gvec const& gkvec_; /// Mapping between local and global G+k vector index. std::vector<int> const& igk_; /// Coordinates of G+k vectors used by GPU kernel. mdarray<double, 2> gkvec_coord_; /// Phase-factor independent coefficients of |beta> functions for atom types. std::array<matrix<double_complex>, N> pw_coeffs_t_; bool reallocate_pw_coeffs_t_on_gpu_{true}; matrix<double_complex> pw_coeffs_a_; std::vector<beta_chunk_t> beta_chunks_; int max_num_beta_; /// Total number of beta-projectors among atom types. int num_beta_t_; /// Split beta-projectors into chunks. void split_in_chunks() { auto& uc = ctx_.unit_cell(); /* initial chunk size */ int chunk_size = std::min(uc.num_atoms(), 256); /* maximum number of chunks */ int num_chunks = uc.num_atoms() / chunk_size + std::min(1, uc.num_atoms() % chunk_size); /* final maximum chunk size */ chunk_size = uc.num_atoms() / num_chunks + std::min(1, uc.num_atoms() % num_chunks); int offset_in_beta_gk{0}; beta_chunks_ = std::vector<beta_chunk_t>(num_chunks); for (int ib = 0; ib < num_chunks; ib++) { /* number of atoms in this chunk */ int na = std::min(uc.num_atoms(), (ib + 1) * chunk_size) - ib * chunk_size; beta_chunks_[ib].num_atoms_ = na; beta_chunks_[ib].desc_ = mdarray<int, 2>(4, na); beta_chunks_[ib].atom_pos_ = mdarray<double, 2>(3, na); int num_beta{0}; for (int i = 0; i < na; i++) { /* global index of atom by local index and chunk */ int ia = ib * chunk_size + i; auto pos = uc.atom(ia).position(); auto& type = uc.atom(ia).type(); /* atom fractional coordinates */ for (int x: {0, 1, 2}) { beta_chunks_[ib].atom_pos_(x, i) = pos[x]; } /* number of beta functions for atom */ beta_chunks_[ib].desc_(beta_desc_idx::nbf, i) = type.mt_basis_size(); /* offset in beta_gk*/ beta_chunks_[ib].desc_(beta_desc_idx::offset, i) = num_beta; /* offset in beta_gk_t */ beta_chunks_[ib].desc_(beta_desc_idx::offset_t, i) = type.offset_lo(); /* global index of atom */ beta_chunks_[ib].desc_(beta_desc_idx::ia, i) = ia; num_beta += type.mt_basis_size(); } /* number of beta-projectors in this chunk */ beta_chunks_[ib].num_beta_ = num_beta; beta_chunks_[ib].offset_ = offset_in_beta_gk; offset_in_beta_gk += num_beta; if (ctx_.processing_unit() == GPU) { beta_chunks_[ib].desc_.allocate(memory_t::device); beta_chunks_[ib].desc_.copy<memory_t::host, memory_t::device>(); beta_chunks_[ib].atom_pos_.allocate(memory_t::device); beta_chunks_[ib].atom_pos_.copy<memory_t::host, memory_t::device>(); } } max_num_beta_ = 0; for (auto& e: beta_chunks_) { max_num_beta_ = std::max(max_num_beta_, e.num_beta_); } num_beta_t_ = 0; for (int iat = 0; iat < uc.num_atom_types(); iat++) { num_beta_t_ += uc.atom_type(iat).mt_lo_basis_size(); } } /// A buffer for <beta|phi> product, shared between instances of Beta_projectors_base class. /** Stored as double to handle both gamma- and general k-point cases */ static mdarray<double, 1>& beta_phi_shared(size_t size__, memory_t mem_type__) { static mdarray<double, 1> a; /* reallocate buffer */ if (a.size() < size__) { a = mdarray<double, 1>(size__, mem_type__, "beta_phi_shared"); } return a; } /// A buffer for beta projectors for a chunk of atoms. static mdarray<double_complex, 1>& pw_coeffs_a_shared(size_t size__, memory_t mem_type__) { static mdarray<double_complex, 1> a; /* reallocate buffer */ if (a.size() < size__) { a = mdarray<double_complex, 1>(size__, mem_type__, "pw_coeffs_a_shared"); } return a; } public: Beta_projectors_base(Simulation_context& ctx__, Gvec const& gkvec__, std::vector<int> const& igk__) : ctx_(ctx__) , gkvec_(gkvec__) , igk_(igk__) { split_in_chunks(); if (!num_beta_t()) { return; } /* allocate memory */ for (int i = 0; i < N; i++) { pw_coeffs_t_[i] = matrix<double_complex>(num_gkvec_loc(), num_beta_t(), memory_t::host, "pw_coeffs_t_"); } if (ctx_.processing_unit() == GPU) { gkvec_coord_ = mdarray<double, 2>(3, num_gkvec_loc(), ctx__.dual_memory_t()); /* copy G+k vectors */ for (int igk_loc = 0; igk_loc < num_gkvec_loc(); igk_loc++) { auto vgk = gkvec_.gkvec(igk_[igk_loc]); for (auto x: {0, 1, 2}) { gkvec_coord_(x, igk_loc) = vgk[x]; } } gkvec_coord_.copy<memory_t::host, memory_t::device>(); } } ~Beta_projectors_base() { beta_phi_shared(0, memory_t::none) = mdarray<double, 1>(); } inline int num_gkvec_loc() const { return static_cast<int>(igk_.size()); } inline Unit_cell const& unit_cell() const { return ctx_.unit_cell(); } matrix<double_complex>& pw_coeffs_t(int i__) { return pw_coeffs_t_[i__]; } /// Plane wave coefficients of |beta> projectors for a chunk of atoms. matrix<double_complex>& pw_coeffs_a() { return pw_coeffs_a_; } /// Calculate inner product between beta-projectors and wave-functions. /** The following is computed: <beta|phi> */ template <typename T> inline matrix<T> inner(int chunk__, Wave_functions& phi__, int ispn__, int idx0__, int n__) { PROFILE("sirius::Beta_projectors_base::inner"); assert(num_gkvec_loc() == phi__.pw_coeffs(ispn__).num_rows_loc()); int nbeta = chunk(chunk__).num_beta_; static_assert(std::is_same<T, double_complex>::value || std::is_same<T, double>::value, "wrong type"); int tsz = std::is_same<T, double_complex>::value ? 2 : 1; auto& buf = beta_phi_shared(tsz * nbeta * n__, ctx_.dual_memory_t()); matrix<T> beta_phi; switch (ctx_.processing_unit()) { case CPU: { beta_phi = matrix<T>(reinterpret_cast<T*>(buf.template at<CPU>()), nbeta, n__); break; } case GPU: { beta_phi = matrix<T>(reinterpret_cast<T*>(buf.template at<CPU>()), reinterpret_cast<T*>(buf.template at<GPU>()), nbeta, n__); break; } } if (std::is_same<T, double_complex>::value) { switch (ctx_.processing_unit()) { case CPU: { /* compute <beta|phi> */ linalg<CPU>::gemm(2, 0, nbeta, n__, num_gkvec_loc(), pw_coeffs_a().template at<CPU>(), num_gkvec_loc(), phi__.pw_coeffs(ispn__).prime().at<CPU>(0, idx0__), phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double_complex*>(beta_phi.template at<CPU>()), nbeta); break; } case GPU: { #ifdef __GPU linalg<GPU>::gemm(2, 0, nbeta, n__, num_gkvec_loc(), pw_coeffs_a().template at<GPU>(), num_gkvec_loc(), phi__.pw_coeffs(ispn__).prime().at<GPU>(0, idx0__), phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double_complex*>(beta_phi.template at<GPU>()), nbeta); beta_phi.template copy<memory_t::device, memory_t::host>(); #else TERMINATE_NO_GPU #endif break; } } } if (std::is_same<T, double>::value) { double a{2}; double a1{-1}; double b{0}; switch (ctx_.processing_unit()) { case CPU: { /* compute <beta|phi> */ linalg<CPU>::gemm(2, 0, nbeta, n__, 2 * num_gkvec_loc(), a, reinterpret_cast<double*>(pw_coeffs_a().template at<CPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().at<CPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), b, reinterpret_cast<double*>(beta_phi.template at<CPU>()), nbeta); if (gkvec_.comm().rank() == 0) { /* subtract one extra G=0 contribution */ linalg<CPU>::ger(nbeta, n__, a1, reinterpret_cast<double*>(pw_coeffs_a().template at<CPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().at<CPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double*>(beta_phi.template at<CPU>()), nbeta); } break; } case GPU: { #ifdef __GPU linalg<GPU>::gemm(2, 0, nbeta, n__, 2 * num_gkvec_loc(), &a, reinterpret_cast<double*>(pw_coeffs_a().template at<GPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().at<GPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), &b, reinterpret_cast<double*>(beta_phi.template at<GPU>()), nbeta); if (gkvec_.comm().rank() == 0) { /* subtract one extra G=0 contribution */ linalg<GPU>::ger(nbeta, n__, &a1, reinterpret_cast<double*>(pw_coeffs_a().template at<GPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().template at<GPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double*>(beta_phi.template at<GPU>()), nbeta); } beta_phi.template copy<memory_t::device, memory_t::host>(); #else TERMINATE_NO_GPU #endif break; } } } gkvec_.comm().allreduce(beta_phi.template at<CPU>(), static_cast<int>(beta_phi.size())); if (ctx_.processing_unit() == GPU) { beta_phi.template copy<memory_t::host, memory_t::device>(); } return std::move(beta_phi); } /// Generate beta-projectors for a chunk of atoms. void generate(int ichunk__, int j__) { PROFILE("sirius::Beta_projectors_base::generate"); auto& pw_coeffs = pw_coeffs_a(); switch (ctx_.processing_unit()) { case CPU: { #pragma omp for for (int i = 0; i < chunk(ichunk__).num_atoms_; i++) { int ia = chunk(ichunk__).desc_(beta_desc_idx::ia, i); double phase = twopi * dot(gkvec_.vk(), ctx_.unit_cell().atom(ia).position()); double_complex phase_k = std::exp(double_complex(0.0, phase)); std::vector<double_complex> phase_gk(num_gkvec_loc()); for (int igk_loc = 0; igk_loc < num_gkvec_loc(); igk_loc++) { auto G = gkvec_.gvec(igk_[igk_loc]); /* total phase e^{i(G+k)r_{\alpha}} */ phase_gk[igk_loc] = std::conj(ctx_.gvec_phase_factor(G, ia) * phase_k); } for (int xi = 0; xi < chunk(ichunk__).desc_(beta_desc_idx::nbf, i); xi++) { for (int igk_loc = 0; igk_loc < num_gkvec_loc(); igk_loc++) { pw_coeffs(igk_loc, chunk(ichunk__).desc_(beta_desc_idx::offset, i) + xi) = pw_coeffs_t_[j__](igk_loc, chunk(ichunk__).desc_(beta_desc_idx::offset_t, i) + xi) * phase_gk[igk_loc]; } } } break; } case GPU: { #ifdef __GPU auto& desc = chunk(ichunk__).desc_; create_beta_gk_gpu(chunk(ichunk__).num_atoms_, num_gkvec_loc(), desc.template at<GPU>(), pw_coeffs_t_[j__].template at<GPU>(), gkvec_coord_.template at<GPU>(), chunk(ichunk__).atom_pos_.template at<GPU>(), pw_coeffs.template at<GPU>()); #endif break; } } } void prepare() { PROFILE("sirius::Beta_projectors_base::prepare"); auto& buf = pw_coeffs_a_shared(num_gkvec_loc() * max_num_beta(), ctx_.dual_memory_t()); switch (ctx_.processing_unit()) { case CPU: { pw_coeffs_a_ = matrix<double_complex>(buf.template at<CPU>(), num_gkvec_loc(), max_num_beta()); break; } case GPU: { pw_coeffs_a_ = matrix<double_complex>(buf.template at<CPU>(), buf.template at<GPU>(), num_gkvec_loc(), max_num_beta()); break; } } if (ctx_.processing_unit() == GPU && reallocate_pw_coeffs_t_on_gpu_) { for (int i = 0; i < N; i++) { pw_coeffs_t_[i].allocate(memory_t::device); pw_coeffs_t_[i].template copy<memory_t::host, memory_t::device>(); } } } void dismiss() { PROFILE("sirius::Beta_projectors_base::dismiss"); if (ctx_.processing_unit() == GPU && reallocate_pw_coeffs_t_on_gpu_) { for (int i = 0; i < N; i++) { pw_coeffs_t_[i].deallocate(memory_t::device); } } } static void cleanup() { beta_phi_shared(0, memory_t::host | memory_t::device) = mdarray<double, 1>(); pw_coeffs_a_shared(0, memory_t::host|memory_t::device) = mdarray<double_complex, 1>(); } inline int num_beta_t() const { return num_beta_t_; } inline int num_chunks() const { return static_cast<int>(beta_chunks_.size()); } inline beta_chunk_t const& chunk(int idx__) const { return beta_chunks_[idx__]; } inline int max_num_beta() const { return max_num_beta_; } }; } // namespace #endif
pdgetrf_rectil.c
/** * * @file pdgetrf_rectil.c * * PLASMA auxiliary routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * LU with Partial pivoting. * * @version 2.6.0 * @author Mathieu Faverge * @author Hatem Ltaief * @date 2009-11-15 * * @generated d Tue Jan 7 11:45:13 2014 * **/ #include "common.h" void CORE_dgetrf_rectil_init(void); #define PARALLEL_KERNEL #define A(m,n) BLKADDR(A, double, m, n) #define IPIV(k) &(IPIV[(int64_t)A.mb*(int64_t)(k)]) /***************************************************************************//** * Parallel tile LU factorization - dynamic scheduling - Right looking **/ void plasma_pdgetrf_rectil_quark(PLASMA_desc A, int *IPIV) { int k, m, n; int tempk, tempm, tempkm, tempkn, tempmm, tempnn; int ldak, ldam; double zone = (double)1.0; double mzone = (double)-1.0; void * fakedep; /* How many threads per panel? Probably needs to be adjusted during factorization. */ CORE_dgetrf_rectil_init(); for (k = 0; k < min(A.mt, A.nt); k++) { tempk = k * A.mb; tempm = A.m - tempk; tempkm = k == A.mt-1 ? tempm : A.mb; tempkn = k == A.nt-1 ? A.n-k*A.nb : A.nb; ldak = BLKLDD(A, k); double *dA = A(k, k); int *dB = IPIV(k); PLASMA_desc pDesc = plasma_desc_submatrix(A, tempk, k*A.nb, tempm, tempkn); #pragma omp task depend(inout:dA[0:A.mb*A.nb]) depend(out:dB[0:pDesc.n]) { int info[3]; info[1] = 0; info[2] = 1; CORE_dgetrf_rectil( pDesc, dB, info ); } /* * Update the trailing submatrix */ fakedep = (void *)(intptr_t)(k+1); for (n = k+1; n < A.nt; n++) { /* * Apply row interchange after the panel (work on the panel) */ tempnn = n == A.nt-1 ? A.n-n*A.nb : A.nb; PLASMA_desc descA = plasma_desc_submatrix(A, tempk, n*A.nb, tempm, tempnn); double *dA = A(k, n); double *dB = A(k, k); int *dipiv = IPIV(k); #pragma omp task depend(inout:dA[0:1]) depend(in:dB[0:ldak], dipiv[0:tempkm]) CORE_dswptr_ontile(descA, 1, tempkm, dipiv, 1, dB, ldak); m = k+1; if ( m < A.mt ) { tempmm = m == A.mt-1 ? A.m-m*A.mb : A.mb; ldam = BLKLDD(A, m); double *dA = A(m , k); double *dB = A(k , n); double *dC = A(m , n); #pragma omp task depend(in:dA[0:A.mb*A.mb], dB[0:A.mb*A.mb]) depend(inout:dC[0:A.mb*A.mb]) cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, (CBLAS_TRANSPOSE)PlasmaNoTrans, tempmm, tempnn, A.nb, mzone, dA, ldam, dB, ldak, zone, dC, ldam); for (m = k+2; m < A.mt; m++) { tempmm = m == A.mt-1 ? A.m-m*A.mb : A.mb; ldam = BLKLDD(A, m); double *dA = A(m , k); double *dB = A(k , n); double *dC = A(m , n); double *fake1 = A(k+1, n); double *fake2 = (double *)fakedep; #if defined(KLANG_VERSION) && defined(KASTOR_USE_CW) #warning "KLANG EXTENSION USED" #pragma omp task depend(in:dA[0:A.mb*A.mb], dB[0:A.mb*A.mb], fake2[0:1]) depend(inout:dC[0:A.mb*A.mb]), depend(cw:fake1[0:A.mb*A.nb]) cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, (CBLAS_TRANSPOSE)PlasmaNoTrans, tempmm, tempnn, A.nb, mzone, dA, ldam, dB, ldak, zone, dC, ldam); #else #pragma omp task depend(in:dA[0:A.mb*A.mb], dB[0:A.mb*A.mb], fake2[0:1]) depend(inout:dC[0:A.mb*A.mb], fake1[0:A.mb*A.nb]) cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, (CBLAS_TRANSPOSE)PlasmaNoTrans, tempmm, tempnn, A.nb, mzone, dA, ldam, dB, ldak, zone, dC, ldam); #endif } } } } for (k = 0; k < min(A.mt, A.nt); k++) { int mintmp; tempk = k * A.mb; tempm = A.m - tempk; tempkm = k == A.mt-1 ? tempm : A.mb; tempkn = k == A.nt-1 ? A.n - k * A.nb : A.nb; mintmp = min(tempkm, tempkn); ldak = BLKLDD(A, k); /* * Apply row interchange behind the panel (work on the panel) */ fakedep = (void*)(intptr_t)k; for (n = 0; n < k; n++) { tempnn = n == A.nt-1 ? A.n-n*A.nb : A.nb; double *Aij = A(k, n); double *prevSwap = A(k-1, n); int *dipiv = IPIV(k); PLASMA_desc descA = plasma_desc_submatrix(A, tempk, n*A.nb, tempm, tempnn); #if defined(KLANG_VERSION) && defined(KASTOR_USE_CW) #warning "KLANG EXTENSION USED" #pragma omp task depend(inout:Aij[0:1]) depend(cw:fakedep) depend(in:dipiv[0:mintmp], prevSwap[0:A.lm*A.nb]) CORE_dlaswp_ontile(descA, 1, mintmp, dipiv, 1); #else #pragma omp task depend(inout:Aij[0:1],fakedep) depend(in:dipiv[0:mintmp], prevSwap[0:A.lm*A.nb]) CORE_dlaswp_ontile(descA, 1, mintmp, dipiv, 1); #endif } } }
BCSR.h
/** * @author : Zhao Chonyyao (cyzhao@zju.edu.cn) * @date : 2021-04-30 * @description: Block Compressed Row Format (BCSR) for sparse matrix. * @version : 1.0 */ #ifndef PhysIKA_BCSR #define PhysIKA_BCSR #include <map> #include <omp.h> #include <vector> #include <Eigen/Dense> #include <Eigen/Sparse> #include "error.h" #include "DEFINE_TYPE.h" template <typename mat_type> using VEC_MAT = std::vector<mat_type, Eigen::aligned_allocator<mat_type>>; namespace PhysIKA { /** * block compressed row format for sparse matrix, for more effective computing. * */ template <typename T, const size_t block_size> class BCSR { public: using ele_type = Eigen::Matrix<T, block_size, block_size>; using VEC = Eigen::Matrix<T, -1, 1>; public: BCSR() : rowsb_(0), colsb_(0), rows_(0), cols_(0), size_(0) {} BCSR(int rowsb, int colsb) : rowsb_(rowsb), colsb_(colsb), rows_(rowsb * block_size), cols_(colsb * block_size), size_(cols_ * rows_) {} BCSR(const BCSR<T, block_size>& other) : rowsb_(other.rowsb()), colsb_(other.colsb()), rows_(other.rows()), cols_(other.cols()), size_(other.size()), value_(other.get_value()), offset_(other.get_offset()), index_(other.get_index()) {} // BCSR(const BCSR<T, block_size>&& other) // :rowsb_(other.rowsb()), colsb_(other.colsb()), rows_(other.rows()),cols_(other.cols()),size_(other.size()), value_(other.get_value()), offset_(other.get_offset()), index_(other.get_index()){} public: void setFromEigenMatrix(const Eigen::SparseMatrix<T, Eigen::RowMajor>& A); inline void clear() { value_.clear(); offset_.clear(); index_.clear(); rowsb_ = colsb_ = rows_ = cols_ = size_ = 0; } inline size_t rows() const { return rows_; } inline size_t cols() const { return cols_; } inline size_t size() const { return size_; } inline size_t rowsb() const { return rowsb_; } inline size_t colsb() const { return colsb_; } inline VEC_MAT<ele_type> get_value() const { return value_; } inline std::vector<size_t> get_offset() const { return offset_; } inline std::vector<size_t> get_index() const { return index_; } // operators override. public: VEC operator*(const VEC& rhs) const; BCSR<T, block_size> operator*(const BCSR<T, block_size>& rhs) const; inline std::vector<ele_type, Eigen::aligned_allocator<ele_type>> get_diagonal() const; //TODO: move this value_ to protected VEC_MAT<ele_type> value_; protected: std::vector<size_t> offset_; std::vector<size_t> index_; // number of rows and cols per block size_t rowsb_, colsb_; // number of rows and cols for the matrix. size_t rows_, cols_, size_; }; template <typename T> VEC_MAT<MAT3<T>> get_block_diagonal(const SPM_R<T>& A); } // namespace PhysIKA //////////////////////////////////////////////////////////////////////// // template implementation // //////////////////////////////////////////////////////////////////////// namespace PhysIKA { template <typename T, const size_t block_size> void BCSR<T, block_size>::setFromEigenMatrix( const Eigen::SparseMatrix<T, Eigen::RowMajor>& A) { clear(); error_msg_ext_cond( A.rows() % block_size != 0 || A.cols() % block_size != 0, "Convert Eigen Matrix to BCSR failed. Since the Matrix's dim cannot " "be divided by block_size. A-->(%lu, %lu) and block_size: %lu", A.rows(), A.cols(), block_size); rows_ = A.rows(); cols_ = A.cols(); size_ = A.size(); rowsb_ = rows_ / block_size; colsb_ = cols_ / block_size; // very simple version. std::map< size_t, ele_type, std::less<size_t>, Eigen::aligned_allocator<std::pair<const size_t, ele_type>>> coos; for (size_t i = 0; i < A.outerSize(); ++i) { for (typename Eigen::SparseMatrix<T, Eigen::RowMajor>::InnerIterator iter( A, i); iter; ++iter) { int rid = iter.row() / block_size, cid = iter.col() / block_size; int bid = rid * colsb_ + cid; if (coos.count(bid) == 0) { coos.insert({ bid, ele_type::Zero() }); } coos[bid]( iter.row() - rid * block_size, iter.col() - cid * block_size) = iter.value(); } } // coo to csr size_t num = 0, last_rid = -1; for (const auto& item : coos) { size_t rid = item.first / colsb_; size_t cid = item.first % colsb_; value_.emplace_back(item.second); index_.emplace_back(cid); if (rid != last_rid) { offset_.emplace_back(num); last_rid = rid; } num++; } offset_.emplace_back(num); } // operators override. // explicit name VEC // TODO: optimize for eigen template <typename T, const size_t block_size> Eigen::Matrix<T, -1, 1> BCSR<T, block_size>::operator*(const VEC& rhs) const { // error_msg_ext_cond( // rhs.rows() != cols_, "BCSR<%lu, %lu>. rhs<%lu>. dim does not match. ", // rows_, cols_, rhs.rows()); VEC res(rows_); res.setZero(); #pragma omp parallel for for (size_t i = 0; i < rowsb_; ++i) { for (size_t j = offset_[i]; j < offset_[i + 1]; ++j) { size_t k = index_[j]; res.segment(i * block_size, block_size).noalias() += value_[j] * rhs.segment(k * block_size, block_size); } } return std::move(res); } template <typename T, const size_t block_size> BCSR<T, block_size> BCSR<T, block_size>::operator*(const BCSR<T, block_size>& rhs) const { // TODO return BCSR<T, block_size>(); } // explicit name ele_type template <typename T, const size_t block_size> std::vector< Eigen::Matrix<T, block_size, block_size>, Eigen::aligned_allocator<Eigen::Matrix<T, block_size, block_size>>> BCSR<T, block_size>::get_diagonal() const { std::vector<ele_type, Eigen::aligned_allocator<ele_type>> block_diag; // very simple version. for (size_t i = 0; i < rowsb_; ++i) { size_t lid = offset_[i], rid = offset_[i + 1]; while (true) { size_t mid = (lid + rid) / 2; size_t cid = index_[mid]; if (cid == i) { block_diag.emplace_back(value_[mid]); break; } else if (cid < i) { lid = mid + 1; } else { rid = mid; } if (lid >= rid) { block_diag.emplace_back(ele_type::Zero()); break; } } } return std::move(block_diag); } template <typename T> VEC_MAT<MAT3<T>> get_block_diagonal(const SPM_R<T>& A) { exit_if(A.rows() != A.cols(), "A should be sysmetric."); VEC_MAT<MAT3<T>> diag_A(A.rows() / 3, MAT3<T>::Zero()); auto fill_one_dim = [&](const size_t offset) -> void { #pragma omp parallel for for (size_t i = offset; i < A.outerSize(); i += 3) { const size_t vert_id = i / 3; const size_t first_index = i - offset; MAT3<T>& A_i = diag_A[vert_id]; for (typename SPM_R<T>::InnerIterator it(A, i); it; ++it) { if (it.index() >= first_index) { size_t diff = it.index() - first_index; for (size_t j = 0; j < diff; ++j) { A_i(offset, j) = 0.0; } A_i(offset, diff) = it.value(); const size_t left = 2 - diff; bool if_advance = true; for (size_t j = 0; j < left; ++j) { if (if_advance) { ++it; ++diff; } if (it && it.index() == first_index + diff) { A_i(offset, diff) = it.value(); if_advance = true; } else { A_i(offset, diff) = 0.0; if_advance = false; } } break; } } } }; fill_one_dim(0); fill_one_dim(1); fill_one_dim(2); return diag_A; } } // namespace PhysIKA #endif
main.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void oddEvenSort (int *a, int N); int main() { int N, i; printf("Inserire il numero degli elementi del vettore da ordinare: "); scanf("%d", &N); int *a = (int *)malloc(N*sizeof(int)); for(i = 0; i < N; i++) { printf("Inserire elemento [%d] nel vettore: ", i); scanf("%d", &a[i]); } oddEvenSort(a,N); printf("\nVettore ordinato: "); for(i = 0; i < N; i++) { printf("%d\t", a[i]); } return 0; } void oddEvenSort (int *a, int N) { int sw1 = 1, start = 0, i; int temp; while(sw1 || start) { sw1 = 0; #pragma omp parallel for private(temp) for(i = start; i < N - 1; i += 2) //Ciclo for sugli elementi di indice pari. { if(a[i] > a[i+1]) { temp = a[i]; a[i] = a[i+1]; a[i+1] = temp; sw1 = 1; } } if(start == 0) { start = 1; } else start = 0; } }
profiler_interface.h
/* # ============================================================================= # Copyright (c) 2016 - 2021 Blue Brain Project/EPFL # # See top-level LICENSE file for details. # ============================================================================= */ #pragma once #include <initializer_list> #include <type_traits> #if defined(NRN_CALIPER) #include <caliper/cali.h> #endif #if defined(CRAYPAT) #include <pat_api.h> #endif #if defined(TAU) #include <TAU.h> #endif #if defined(LIKWID_PERFMON) #include <likwid.h> #endif namespace nrn { namespace detail { /*! \class Instrumentor * \brief Instrumentation infrastructure for benchmarking and profiling. * * The Instrumentor class exposes static methods that can be used to * toggle with fine-grained resolution the profiling of specific * areas within the code. */ template <class... TProfilerImpl> struct Instrumentor { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-value" /*! \fn phase_begin * \brief Activate the collection of profiling data within a code region. * * This function semantically defines the beginning of a region * of code that the user wishes to profile. * Loops through all enabled profilers and calls the relevant * `phase_begin` function. * This function should have a non-empty implementation only for * profilers that allow multiple code regions with different names * to be profiled concurrently. * * @param name the (unique) identifier of the code region to be profiled */ inline static void phase_begin(const char* name) { std::initializer_list<int>{(TProfilerImpl::phase_begin(name), 0)...}; } /*! \fn phase_end * \brief Deactivate the collection of profiling data within a code region. * * This function semantically defines the end of a region * of code that the user wishes to profile. * Loops through all enabled profilers and calls the relevant * `phase_end` function. * This function should have a non-empty implementation only for * profilers that allow multiple code regions with different names * to be profiled concurrently. * * @param name the (unique) identifier of the code region to be profiled */ inline static void phase_end(const char* name) { std::initializer_list<int>{(TProfilerImpl::phase_end(name), 0)...}; } /*! \fn start_profile * \brief Globally activate the collection of profiling data. * * Activate the collection of profiler data without defining * a region of interest with a given name, as opposed to `phase_begin`. * Loops through all enabled profilers and calls the relevant * `start_profile` function. * This function should have a non-empty implementation only for * profilers that expose simply a global begin/end interface, without * named regions. */ inline static void start_profile() { std::initializer_list<int>{(TProfilerImpl::start_profile(), 0)...}; } /*! \fn stop_profile * \brief Globally deactivate the collection of profiling data. * * Deactivate the collection of profiler data without defining * a region of interest with a given name, as opposed to `phase_end`. * Loops through all enabled profilers and calls the relevant * `stop_profile` function. * This function should have a non-empty implementation only for * profilers that expose simply a global begin/end interface, without * named regions. */ inline static void stop_profile() { std::initializer_list<int>{(TProfilerImpl::stop_profile(), 0)...}; } /*! \fn init_profile * \brief Initialize the profiler. * * Initialize a profiler's internal structure, without activating yet * any data collection, similar in concept to MPI_Init. * Loops through all enabled profilers and calls the relevant * `init_profile` function. * This function should have a non-empty implementation only for * profilers that require special initialization, typically before * any memory allocation is done. */ inline static void init_profile() { std::initializer_list<int>{(TProfilerImpl::init_profile(), 0)...}; } /*! \fn finalize_profile * \brief Finalize the profiler. * * Finalize a profiler's internal structure, without activating yet * any data collection, similar in concept to MPI_Finalize. * Loops through all enabled profilers and calls the relevant * `finalize_profile` function. * This function should have a non-empty implementation only for * profilers that require special finalization. */ inline static void finalize_profile() { std::initializer_list<int>{(TProfilerImpl::finalize_profile(), 0)...}; } #pragma clang diagnostic pop }; #if defined(NRN_CALIPER) struct Caliper { inline static void phase_begin(const char* name) { CALI_MARK_BEGIN(name); }; inline static void phase_end(const char* name) { CALI_MARK_END(name); }; inline static void start_profile(){}; inline static void stop_profile(){}; inline static void init_profile(){}; inline static void finalize_profile(){}; }; #endif #if defined(CRAYPAT) struct CrayPat { inline static void phase_begin(const char* name){}; inline static void phase_end(const char* name){}; inline static void start_profile() { PAT_record(PAT_STATE_ON); }; inline static void stop_profile() { PAT_record(PAT_STATE_OFF); }; inline static void init_profile(){}; inline static void finalize_profile(){}; }; #endif #if defined(TAU) struct Tau { inline static void phase_begin(const char* name){}; inline static void phase_end(const char* name){}; inline static void start_profile() { TAU_ENABLE_INSTRUMENTATION(); }; inline static void stop_profile() { TAU_DISABLE_INSTRUMENTATION(); }; inline static void init_profile(){}; inline static void finalize_profile(){}; }; #endif #if defined(LIKWID_PERFMON) struct Likwid { inline static void phase_begin(const char* name) { LIKWID_MARKER_START(name); }; inline static void phase_end(const char* name) { LIKWID_MARKER_STOP(name); }; inline static void start_profile(){}; inline static void stop_profile(){}; inline static void init_profile() { LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; } }; inline static void finalize_profile() { LIKWID_MARKER_CLOSE; }; }; #endif struct NullInstrumentor { inline static void phase_begin(const char* name){}; inline static void phase_end(const char* name){}; inline static void start_profile(){}; inline static void stop_profile(){}; inline static void init_profile(){}; inline static void finalize_profile(){}; }; using InstrumentorImpl = detail::Instrumentor< #if defined NRN_CALIPER detail::Caliper, #endif #if defined(CRAYPAT) detail::CrayPat, #endif #if defined(TAU) detail::Tau, #endif #if defined(LIKWID_PERFMON) detail::Likwid, #endif detail::NullInstrumentor>; } // namespace detail namespace Instrumentor { struct phase { const char* phase_name; phase(const char* name) : phase_name(name) { detail::InstrumentorImpl::phase_begin(phase_name); } ~phase() { detail::InstrumentorImpl::phase_end(phase_name); } }; inline static void start_profile() { detail::InstrumentorImpl::start_profile(); } inline static void stop_profile() { detail::InstrumentorImpl::stop_profile(); } inline static void phase_begin(const char* name) { detail::InstrumentorImpl::phase_begin(name); } inline static void phase_end(const char* name) { detail::InstrumentorImpl::phase_end(name); } inline static void init_profile() { detail::InstrumentorImpl::init_profile(); } inline static void finalize_profile() { detail::InstrumentorImpl::finalize_profile(); } } // namespace Instrumentor } // namespace nrn
pr71758.c
/* PR middle-end/71758 */ void foo (int *p) { long long i = 0; #pragma omp target device (i) ; #pragma omp target update device (i) to (p[0]) }
solver.h
#pragma once #include <fgpl/src/broadcast.h> #include <fgpl/src/dist_hash_map.h> #include <fgpl/src/dist_hash_set.h> #include <fgpl/src/dist_range.h> #include <fgpl/src/hash_set.h> #include <fgpl/src/reducer.h> #include <hps/src/hps.h> #include <omp_hash_map/src/omp_hash_map.h> #include <omp_hash_map/src/omp_hash_set.h> #include <cmath> #include <cstdlib> #include <functional> #include <numeric> #include <unordered_set> #include <queue> #include "../config.h" #include "../det/det.h" #include "../math_vector.h" #include "../parallel.h" #include "../result.h" #include "../timer.h" #include "../util.h" #include "davidson.h" #include "green.h" #include "hamiltonian.h" #include "hc_server.h" #include "uncert_result.h" template <class S> class Solver { public: void run(); private: S system; Hamiltonian<S> hamiltonian; std::vector<double> eps_tried_prev; fgpl::HashSet<Det, DetHasher> var_dets; size_t pt_mem_avail; size_t var_iteration_global; double eps_var_min; double eps_pt; double eps_pt_dtm; double eps_pt_psto; double target_error; size_t bytes_per_det; void run_all_variations(); void run_variation(const double eps_var, const bool until_converged = true); void run_all_perturbations(); void run_perturbation(const double eps_var); double get_energy_pt_dtm(const double eps_var); UncertResult get_energy_pt_psto(const double eps_var, const double energy_pt_dtm); UncertResult get_energy_pt_sto(const double eps_var, const UncertResult& get_energy_pt_sto); bool load_variation_result(const std::string& filename); void save_variation_result(const std::string& filename); void save_pair_contrib(const double eps_var); void print_dets_info() const; std::string get_wf_filename(const double eps_var) const; template <class C> std::array<double, 2> mapreduce_sum( const fgpl::DistHashMap<Det, C, DetHasher>& map, const std::function<double(const Det& det, const C& hc_sum)>& mapper) const; }; template <class S> void Solver<S>::run() { Timer::start("setup"); std::setlocale(LC_ALL, "en_US.UTF-8"); system.setup(); target_error = Config::get<double>("target_error", 5.0e-5); Result::put("energy_hf", system.energy_hf); Timer::end(); std::vector<std::vector<size_t>> connections; if (!Config::get<bool>("skip_var", false)) { Timer::start("variation"); run_all_variations(); if (Config::get<bool>("2rdm", false) || Config::get<bool>("get_2rdm_csv", false)) { connections = hamiltonian.matrix.get_connections(); } hamiltonian.clear(); Timer::end(); } Timer::start("post variation"); if (Config::get<bool>("hc_server_mode", false)) { if (system.time_sym) throw std::invalid_argument("time sym hc server not implemented"); const auto& wf_filename = get_wf_filename(eps_var_min); if (!load_variation_result(wf_filename)) throw std::runtime_error("failed to load wf"); hamiltonian.update(system); HcServer<S> server(system, hamiltonian); server.run(); return; } if (Config::get<bool>("get_green", false)) { if (system.time_sym) throw std::invalid_argument("time sym green not implemented"); Timer::start("green"); Green<S> green(system, hamiltonian); green.run(); Timer::end(); } system.post_variation(connections); connections.clear(); connections.shrink_to_fit(); hamiltonian.clear(); eps_tried_prev.clear(); var_dets.clear_and_shrink(); Timer::end(); if (Config::get<bool>("var_only", false)) return; Timer::start("perturbation"); run_all_perturbations(); system.post_perturbation(); Timer::end(); } template <class S> void Solver<S>::run_all_variations() { if (Parallel::is_master()) { printf("Final iteration 0 HF ndets= 1 energy= %.8f\n", system.energy_hf); } const auto& eps_vars = Config::get<std::vector<double>>("eps_vars"); const auto& eps_vars_schedule = Config::get<std::vector<double>>("eps_vars_schedule"); double eps_var_prev = Util::INF; for (const auto& det : system.dets) var_dets.set(det); auto it_schedule = eps_vars_schedule.begin(); var_iteration_global = 0; eps_var_min = eps_vars.back(); const bool get_pair_contrib = Config::get<bool>("get_pair_contrib", false); for (const double eps_var : eps_vars) { Timer::start(Util::str_printf("eps_var %#.2e", eps_var)); const auto& filename = get_wf_filename(eps_var); if (Config::get<bool>("force_var", false) || !load_variation_result(filename)) { // Perform extra scheduled eps. while (it_schedule != eps_vars_schedule.end() && *it_schedule >= eps_var_prev) it_schedule++; while (it_schedule != eps_vars_schedule.end() && *it_schedule > eps_var) { const double eps_var_extra = *it_schedule; Timer::start(Util::str_printf("extra %#.2e", eps_var_extra)); run_variation(eps_var_extra, false); Timer::end(); it_schedule++; } Timer::start("main"); run_variation(eps_var); Result::put<double>(Util::str_printf("energy_var/%#.2e", eps_var), system.energy_var); Timer::end(); save_variation_result(filename); } else { eps_tried_prev.clear(); var_dets.clear(); for (const auto& det : system.dets) var_dets.set(det); // hamiltonian.clear(); Result::put<double>(Util::str_printf("energy_var/%#.2e", eps_var), system.energy_var); } if (Parallel::is_master() && get_pair_contrib) { save_pair_contrib(eps_var); } eps_var_prev = eps_var; Timer::end(); } // hamiltonian.clear(); eps_tried_prev.clear(); eps_tried_prev.shrink_to_fit(); var_dets.clear_and_shrink(); } template <class S> void Solver<S>::run_all_perturbations() { const auto& eps_vars = Config::get<std::vector<double>>("eps_vars"); bytes_per_det = N_CHUNKS * 16; #ifdef INF_ORBS bytes_per_det += 128; #endif for (const double eps_var : eps_vars) { Timer::start(Util::str_printf("eps_var %#.2e", eps_var)); run_perturbation(eps_var); Timer::end(); } } template <class S> void Solver<S>::run_variation(const double eps_var, const bool until_converged) { Davidson davidson; fgpl::DistHashSet<Det, DetHasher> dist_new_dets; size_t n_dets = system.get_n_dets(); size_t n_dets_new = n_dets; double energy_var_prev = 0.0; bool converged = false; size_t iteration = 0; bool dets_converged = false; const bool get_pair_contrib = Config::get<bool>("get_pair_contrib", false); bool var_sd = Config::get<bool>("var_sd", get_pair_contrib); while (!converged) { eps_tried_prev.resize(n_dets, Util::INF); if (until_converged) Timer::start(Util::str_printf("#%zu", iteration + 1)); // Random execution and broadcast. if (!dets_converged) { n_dets_new = n_dets; for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_dets, 5).for_each([&](const size_t i) { const auto& det = system.dets[i]; const double coef = system.coefs[i]; double eps_min = eps_var / std::abs(coef); if (i == 0 && var_sd) eps_min = 0.0; if (system.time_sym && det.up != det.dn) eps_min *= Util::SQRT2; if (eps_min >= eps_tried_prev[i] * 0.999) return; Det connected_det_reg; const auto& connected_det_handler = [&](const Det& connected_det, const int n_excite) { connected_det_reg = connected_det; if (system.time_sym && connected_det.up > connected_det.dn) { connected_det_reg.reverse_spin(); } if (var_dets.has(connected_det_reg)) return; if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, connected_det, 1); if (std::abs(h_ai) < eps_min) return; // Filter out small single excitation. } dist_new_dets.async_set(connected_det_reg); }; system.find_connected_dets(det, eps_tried_prev[i], eps_min, connected_det_handler); eps_tried_prev[i] = eps_min; }); dist_new_dets.sync(); n_dets_new += dist_new_dets.get_n_keys(); system.dets.reserve(n_dets_new); system.coefs.reserve(n_dets_new); dist_new_dets.for_each_serial([&](const Det& connected_det, const size_t) { var_dets.set(connected_det); system.dets.push_back(connected_det); system.coefs.push_back(1.0e-16); }); dist_new_dets.clear(); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } if (Parallel::is_master()) { printf("\nNumber of dets / new dets: %'zu / %'zu\n", n_dets_new, n_dets_new - n_dets); } Timer::checkpoint("get next det list"); hamiltonian.update(system); } const double davidson_target_error = until_converged ? target_error * target_error * 1e-4 : target_error * target_error; davidson.diagonalize( hamiltonian.matrix, system.coefs, davidson_target_error, Parallel::is_master()); const double energy_var_new = davidson.get_lowest_eigenvalue(); system.coefs = davidson.get_lowest_eigenvector(); Timer::checkpoint("diagonalize sparse hamiltonian"); var_iteration_global++; if (Parallel::is_master()) { printf("Iteration %zu ", var_iteration_global); printf("eps1= %#.2e ndets= %'zu energy= %.8f\n", eps_var, n_dets_new, energy_var_new); } if (std::abs(energy_var_new - energy_var_prev) < target_error * target_error * 1e-2) { converged = true; } if (n_dets_new < n_dets * 1.001) { dets_converged = true; } if (dets_converged && davidson.converged) { converged = true; } n_dets = n_dets_new; energy_var_prev = energy_var_new; if (!until_converged) break; Timer::end(); iteration++; } system.energy_var = energy_var_prev; if (Parallel::is_master() && until_converged) { printf("Final iteration %zu ", var_iteration_global); printf("eps1= %#.2e ndets= %'zu energy= %.8f\n", eps_var, n_dets, system.energy_var); print_dets_info(); } } template <class S> void Solver<S>::run_perturbation(const double eps_var) { double default_eps_pt_dtm = 2.0e-6; double default_eps_pt_psto = 1.0e-7; double default_eps_pt = 1.0e-20; if (system.type == SystemType::HEG) { default_eps_pt_psto = default_eps_pt_dtm; default_eps_pt = eps_var * 1.0e-20; } eps_pt_dtm = Config::get<double>("eps_pt_dtm", default_eps_pt_dtm); eps_pt_psto = Config::get<double>("eps_pt_psto", default_eps_pt_psto); eps_pt = Config::get<double>("eps_pt", default_eps_pt); if (eps_pt_psto < eps_pt) eps_pt_psto = eps_pt; if (eps_pt_dtm < eps_pt_psto) eps_pt_dtm = eps_pt_psto; // If result already exists, return. const auto& value_entry = Util::str_printf("energy_total/%#.2e/%#.2e/value", eps_var, eps_pt); const auto& uncert_entry = Util::str_printf("energy_total/%#.2e/%#.2e/uncert", eps_var, eps_pt); UncertResult res(Result::get<double>(value_entry, 0.0)); if (res.value != 0.0) { if (Parallel::is_master()) { res.uncert = Result::get<double>(uncert_entry, 0.0); printf("Total energy: %s (loaded from result file)\n", res.to_string().c_str()); } if (!Config::get<bool>("force_pt", false)) return; } // Load var wf. const auto& var_filename = get_wf_filename(eps_var); if (!load_variation_result(var_filename)) { throw new std::runtime_error("cannot load variation results"); } system.update_diag_helper(); if (system.time_sym) system.unpack_time_sym(); // Perform multi stage PT. system.dets.shrink_to_fit(); system.coefs.shrink_to_fit(); var_dets.clear_and_shrink(); var_dets.reserve(system.get_n_dets()); for (const auto& det : system.dets) var_dets.set(det); size_t mem_total = Config::get<double>("mem_total", Util::get_mem_total()); #ifdef INF_ORBS mem_total *= 0.8; #endif const size_t mem_var = system.get_n_dets() * (bytes_per_det * 3 + 8); const double mem_left = mem_total * 0.7 - mem_var - system.helper_size; assert(mem_left > 0); pt_mem_avail = mem_left; const size_t n_procs = Parallel::get_n_procs(); if (n_procs >= 2) { pt_mem_avail = static_cast<size_t>(pt_mem_avail * 0.7 * n_procs); } if (Parallel::is_master()) { printf("Memory total: %.1fGB\n", mem_total * 1.0e-9); printf("Helper size: %.1fGB\n", system.helper_size * 1.0e-9); printf("Bytes per det: %zu\n", bytes_per_det); printf("Memory var: %.1fGB\n", mem_var * 1.0e-9); printf("Memory PT limit: %.1fGB\n", pt_mem_avail * 1.0e-9); } const double energy_pt_dtm = get_energy_pt_dtm(eps_var); const UncertResult energy_pt_psto = get_energy_pt_psto(eps_var, energy_pt_dtm); const UncertResult energy_pt = get_energy_pt_sto(eps_var, energy_pt_psto); if (Parallel::is_master()) { printf("Total energy: %s Ha\n", energy_pt.to_string().c_str()); } Result::put(value_entry, energy_pt.value); Result::put(uncert_entry, energy_pt.uncert); } template <class S> double Solver<S>::get_energy_pt_dtm(const double eps_var) { if (eps_pt_dtm >= 1.0) return system.energy_var; Timer::start(Util::str_printf("dtm %#.2e", eps_pt_dtm)); const size_t n_var_dets = system.get_n_dets(); size_t n_batches = Config::get<size_t>("n_batches_pt_dtm", 0); fgpl::DistHashMap<Det, MathVector<double, 1>, DetHasher> hc_sums; size_t bytes_per_entry = bytes_per_det + 8; const DetHasher det_hasher; // Estimate best n batches. if (n_batches == 0) { fgpl::DistRange<size_t>(50, n_var_dets, 100).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { if (var_dets.has(det_a)) return; const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if ((batch_hash & 127) != 0) return; // use 1st of 16 batches. if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_dtm) return; // Filter out small single excitation. } MathVector<double, 1> contrib; hc_sums.async_set(det_a, contrib); }; system.find_connected_dets(det, Util::INF, eps_pt_dtm / std::abs(coef), pt_det_handler); }); hc_sums.sync(); const size_t n_pt_dets = hc_sums.get_n_keys(); n_batches = static_cast<size_t>(ceil(2.5 * 128 * 100 * n_pt_dets * bytes_per_entry / pt_mem_avail)); if (n_batches == 0) n_batches = 1; fgpl::broadcast(n_batches); if (Parallel::is_master()) { printf("Number of dtm batches: %zu\n", n_batches); } Timer::checkpoint("determine number of dtm batches"); hc_sums.clear(); } double energy_sum = 0.0; double energy_sq_sum = 0.0; size_t n_pt_dets_sum = 0; UncertResult energy_pt_dtm; for (size_t batch_id = 0; batch_id < n_batches; batch_id++) { Timer::start(Util::str_printf("#%zu/%zu", batch_id + 1, n_batches)); for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_var_dets, 5).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if (batch_hash % n_batches != batch_id) return; if (var_dets.has(det_a)) return; const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_dtm) return; // Filter out small single excitation. const MathVector<double, 1> contrib(hc); hc_sums.async_set(det_a, contrib, fgpl::Reducer<MathVector<double, 1>>::sum); }; system.find_connected_dets(det, Util::INF, eps_pt_dtm / std::abs(coef), pt_det_handler); }); hc_sums.sync(fgpl::Reducer<MathVector<double, 1>>::sum); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } const size_t n_pt_dets = hc_sums.get_n_keys(); if (Parallel::is_master()) { printf("\nNumber of dtm pt dets: %'zu\n", n_pt_dets); } n_pt_dets_sum += n_pt_dets; Timer::checkpoint("create hc sums"); const auto& energy_pt_dtm_batch = mapreduce_sum<MathVector<double, 1>>( hc_sums, [&](const Det& det_a, const MathVector<double, 1>& hc_sum) { const double H_aa = system.get_hamiltonian_elem(det_a, det_a, 0); const double contrib = hc_sum[0] * hc_sum[0] / (system.energy_var - H_aa); return contrib; }); energy_sum += energy_pt_dtm_batch[0]; energy_sq_sum += energy_pt_dtm_batch[1]; energy_pt_dtm.value = energy_sum / (batch_id + 1) * n_batches; if (batch_id == n_batches - 1) { energy_pt_dtm.uncert = 0.0; } else { const double energy_avg = energy_sum / n_pt_dets_sum; const double sample_stdev = sqrt(energy_sq_sum / n_pt_dets_sum - energy_avg * energy_avg); energy_pt_dtm.uncert = sample_stdev * sqrt(n_pt_dets_sum) / (batch_id + 1) * (n_batches - batch_id - 1); } if (Parallel::is_master()) { printf("PT dtm batch correction: " ENERGY_FORMAT "\n", energy_pt_dtm_batch[0]); printf("PT dtm correction (eps1= %.2e):", eps_var); printf(" %s Ha\n", energy_pt_dtm.to_string().c_str()); printf("PT dtm total energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_dtm + system.energy_var).to_string().c_str()); printf("Correlation energy (eps1= %.2e):", eps_var); printf( " %s Ha\n", (energy_pt_dtm + system.energy_var - system.energy_hf).to_string().c_str()); } hc_sums.clear(); Timer::end(); // batch } hc_sums.clear_and_shrink(); Timer::end(); // dtm return energy_pt_dtm.value + system.energy_var; } template <class S> UncertResult Solver<S>::get_energy_pt_psto(const double eps_var, const double energy_pt_dtm) { if (eps_pt_psto >= eps_pt_dtm) return UncertResult(energy_pt_dtm, 0.0); Timer::start(Util::str_printf("psto %#.2e", eps_pt_psto)); const size_t n_var_dets = system.get_n_dets(); size_t n_batches = Config::get<size_t>("n_batches_pt_psto", 0); fgpl::DistHashMap<Det, MathVector<double, 2>, DetHasher> hc_sums; const size_t bytes_per_entry = bytes_per_det + 16; const DetHasher det_hasher; // Estimate best n batches. if (n_batches == 0) { fgpl::DistRange<size_t>(50, n_var_dets, 100).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { if (var_dets.has(det_a)) return; const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if ((batch_hash & 127) != 0) return; // use 1st of 16 batches. if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_psto) return; // Filter out small single excitation. } MathVector<double, 2> contrib; hc_sums.async_set(det_a, contrib); }; system.find_connected_dets(det, Util::INF, eps_pt_psto / std::abs(coef), pt_det_handler); }); hc_sums.sync(); const size_t n_pt_dets = hc_sums.get_n_keys(); const double mem_usage = Config::get<double>("pt_psto_mem_usage", 1.0); n_batches = static_cast<size_t>( ceil(2.5 * 128 * 100 * n_pt_dets * bytes_per_entry / (pt_mem_avail * mem_usage))); if (n_batches < 16) n_batches = 16; fgpl::broadcast(n_batches); if (Parallel::is_master()) { printf("Number of psto batches: %zu\n", n_batches); } Timer::checkpoint("determine number of psto batches"); hc_sums.clear(); } double energy_sum = 0.0; double energy_sq_sum = 0.0; size_t n_pt_dets_sum = 0; UncertResult energy_pt_psto; for (size_t batch_id = 0; batch_id < n_batches; batch_id++) { Timer::start(Util::str_printf("#%zu/%zu", batch_id + 1, n_batches)); for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_var_dets, 5).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if (batch_hash % n_batches != batch_id) return; if (var_dets.has(det_a)) return; const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_psto) return; // Filter out small single excitation. MathVector<double, 2> contrib; contrib[0] = hc; if (std::abs(hc) >= eps_pt_dtm) contrib[1] = hc; hc_sums.async_set(det_a, contrib, fgpl::Reducer<MathVector<double, 2>>::sum); }; system.find_connected_dets(det, Util::INF, eps_pt_psto / std::abs(coef), pt_det_handler); }); hc_sums.sync(fgpl::Reducer<MathVector<double, 2>>::sum); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } const size_t n_pt_dets = hc_sums.get_n_keys(); if (Parallel::is_master()) { printf("\nNumber of psto pt dets: %'zu\n", n_pt_dets); } n_pt_dets_sum += n_pt_dets; Timer::checkpoint("create hc sums"); const auto& energy_pt_psto_batch = mapreduce_sum<MathVector<double, 2>>( hc_sums, [&](const Det& det_a, const MathVector<double, 2>& hc_sum) { const double hc_sum_sq_diff = hc_sum[0] * hc_sum[0] - hc_sum[1] * hc_sum[1]; const double H_aa = system.get_hamiltonian_elem(det_a, det_a, 0); const double contrib = hc_sum_sq_diff / (system.energy_var - H_aa); return contrib; }); energy_sum += energy_pt_psto_batch[0]; energy_sq_sum += energy_pt_psto_batch[1]; energy_pt_psto.value = energy_sum / (batch_id + 1) * n_batches; if (batch_id == n_batches - 1) { energy_pt_psto.uncert = 0.0; } else { const double energy_avg = energy_sum / n_pt_dets_sum; const double sample_stdev = sqrt(energy_sq_sum / n_pt_dets_sum - energy_avg * energy_avg); const double mean_stdev = sample_stdev / sqrt(n_pt_dets_sum); energy_pt_psto.uncert = mean_stdev * n_pt_dets_sum / (batch_id + 1) * (n_batches - batch_id - 1); // energy_pt_psto.uncert = sample_stdev * sqrt(n_pt_dets_sum) / (batch_id + 1) * n_batches; } if (Parallel::is_master()) { printf("PT psto batch correction: " ENERGY_FORMAT "\n", energy_pt_psto_batch[0]); printf("PT psto correction (eps1= %.2e):", eps_var); printf(" %s Ha\n", energy_pt_psto.to_string().c_str()); printf("PT psto total energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_psto + energy_pt_dtm).to_string().c_str()); printf("Correlation energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_psto + energy_pt_dtm - system.energy_hf).to_string().c_str()); } hc_sums.clear(); Timer::end(); // batch if (energy_pt_psto.uncert <= target_error * 0.7) break; if (eps_pt_psto <= eps_pt && energy_pt_psto.uncert <= target_error) break; } Timer::end(); // psto return energy_pt_psto + energy_pt_dtm; } template <class S> UncertResult Solver<S>::get_energy_pt_sto( const double eps_var, const UncertResult& energy_pt_psto) { if (eps_pt >= eps_pt_psto) return energy_pt_psto; const size_t max_pt_iterations = Config::get<size_t>("max_pt_iterations", 100); fgpl::DistHashMap<Det, MathVector<double, 3>, DetHasher> hc_sums; const size_t bytes_per_entry = bytes_per_det + 24; const size_t n_var_dets = system.get_n_dets(); size_t n_batches = Config::get<size_t>("n_batches_pt_sto", 0); if (n_batches == 0) n_batches = 64; size_t n_samples = Config::get<size_t>("n_samples_pt_sto", 0); std::vector<double> probs(n_var_dets); std::vector<double> cum_probs(n_var_dets); // For sampling. std::unordered_map<size_t, unsigned> sample_dets; std::vector<size_t> sample_dets_list; size_t iteration = 0; const DetHasher det_hasher; UncertResult energy_pt_sto; std::vector<double> energy_pt_sto_loops; // Contruct probs. double sum_weights = 0.0; for (size_t i = 0; i < n_var_dets; i++) { sum_weights += std::abs(system.coefs[i]); } for (size_t i = 0; i < n_var_dets; i++) { probs[i] = std::abs(system.coefs[i]) / sum_weights; cum_probs[i] = probs[i]; if (i > 0) cum_probs[i] += cum_probs[i - 1]; } const unsigned random_seed = Config::get<unsigned>("random_seed", time(nullptr)); srand(random_seed); Timer::start(Util::str_printf("sto %#.2e", eps_pt)); // Estimate best n sample. if (n_samples == 0) { for (size_t i = 0; i < 1000; i++) { const double rand_01 = (static_cast<double>(rand()) / (RAND_MAX)); const int sample_det_id = std::lower_bound(cum_probs.begin(), cum_probs.end(), rand_01) - cum_probs.begin(); if (sample_dets.count(sample_det_id) == 0) sample_dets_list.push_back(sample_det_id); sample_dets[sample_det_id]++; } fgpl::broadcast(sample_dets); fgpl::broadcast(sample_dets_list); size_t n_unique_samples = sample_dets_list.size(); fgpl::DistRange<size_t>(0, n_unique_samples).for_each([&](const size_t sample_id) { const size_t i = sample_dets_list[sample_id]; const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { if (var_dets.has(det_a)) return; const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if ((batch_hash & 127) != 0) return; if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt) return; // Filter out small single excitation. } MathVector<double, 3> contrib; hc_sums.async_set(det_a, contrib); }; system.find_connected_dets(det, Util::INF, eps_pt / std::abs(coef), pt_det_handler); }); hc_sums.sync(); const size_t n_pt_dets = hc_sums.get_n_keys(); hc_sums.clear(); const size_t n_pt_dets_batch = n_pt_dets * 128 / n_batches; double default_mem_usage = 0.4; if (system.type == SystemType::HEG) default_mem_usage = 1.0; const double mem_usage = Config::get<double>("pt_sto_mem_usage", default_mem_usage); size_t n_unique_target = pt_mem_avail * mem_usage * n_unique_samples / bytes_per_entry / 5.0 / n_pt_dets_batch; const size_t max_unique_targets = n_var_dets / 8 + 1; if (n_unique_target >= max_unique_targets) n_unique_target = max_unique_targets; sample_dets.clear(); sample_dets_list.clear(); n_samples = 0; n_unique_samples = 0; while (n_unique_samples < n_unique_target) { const double rand_01 = (static_cast<double>(rand()) / (RAND_MAX)); const int sample_det_id = std::lower_bound(cum_probs.begin(), cum_probs.end(), rand_01) - cum_probs.begin(); if (sample_dets.count(sample_det_id) == 0) { n_unique_samples++; } n_samples++; sample_dets[sample_det_id]++; } sample_dets.clear(); fgpl::broadcast(n_samples); if (Parallel::is_master()) { printf("Number of samples chosen: %'zu\n", n_samples); } Timer::checkpoint("determine n samples"); } while (iteration < max_pt_iterations) { Timer::start(Util::str_printf("#%zu", iteration + 1)); // Generate random sample for (size_t i = 0; i < n_samples; i++) { const double rand_01 = (static_cast<double>(rand()) / (RAND_MAX)); const int sample_det_id = std::lower_bound(cum_probs.begin(), cum_probs.end(), rand_01) - cum_probs.begin(); if (sample_dets.count(sample_det_id) == 0) sample_dets_list.push_back(sample_det_id); sample_dets[sample_det_id]++; } fgpl::broadcast(sample_dets); fgpl::broadcast(sample_dets_list); if (Parallel::is_master()) { printf( "Number of unique variational determinants in sample: %'zu\n", sample_dets_list.size()); } // Select random batch. size_t batch_id = rand() % n_batches; fgpl::broadcast(batch_id); const size_t n_unique_samples = sample_dets_list.size(); if (Parallel::is_master()) printf("Batch id: %zu / %zu\n", batch_id, n_batches); for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_unique_samples, 5).for_each([&](const size_t sample_id) { const size_t i = sample_dets_list[sample_id]; const double count = static_cast<double>(sample_dets[i]); const Det& det = system.dets[i]; const double coef = system.coefs[i]; const double prob = probs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if (batch_hash % n_batches != batch_id) return; if (var_dets.has(det_a)) return; const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt) return; // Filter out small single excitation. const double factor = static_cast<double>(n_batches) / (n_samples * (n_samples - 1)); MathVector<double, 3> contrib; contrib[0] = count * hc / prob * sqrt(factor); if (std::abs(hc) < eps_pt_psto) { contrib[2] = (n_samples - 1 - count / prob) * hc * hc * factor * count / prob; } else { contrib[1] = contrib[0]; } hc_sums.async_set(det_a, contrib, fgpl::Reducer<MathVector<double, 3>>::sum); }; system.find_connected_dets(det, Util::INF, eps_pt / std::abs(coef), pt_det_handler); }); hc_sums.sync(fgpl::Reducer<MathVector<double, 3>>::sum); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } const size_t n_pt_dets = hc_sums.get_n_keys(); if (Parallel::is_master()) printf("\nNumber of sto pt dets: %'zu\n", n_pt_dets); sample_dets.clear(); sample_dets_list.clear(); Timer::checkpoint("create hc sums"); const double energy_pt_sto_loop = mapreduce_sum<MathVector<double, 3>>( hc_sums, [&](const Det& det_a, const MathVector<double, 3>& hc_sum) { const double h_aa = system.get_hamiltonian_elem(det_a, det_a, 0); const double factor = 1.0 / (system.energy_var - h_aa); return (hc_sum[0] * hc_sum[0] - hc_sum[1] * hc_sum[1] + hc_sum[2]) * factor; })[0]; energy_pt_sto_loops.push_back(energy_pt_sto_loop); energy_pt_sto.value = Util::avg(energy_pt_sto_loops); energy_pt_sto.uncert = Util::stdev(energy_pt_sto_loops) / sqrt(iteration + 1.0); if (Parallel::is_master()) { printf("PT sto loop correction: " ENERGY_FORMAT "\n", energy_pt_sto_loop); printf("PT sto correction (eps1= %.2e):", eps_var); printf(" %s Ha\n", energy_pt_sto.to_string().c_str()); printf("PT sto total energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_sto + energy_pt_psto).to_string().c_str()); printf("Correlation energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_sto + energy_pt_psto - system.energy_hf).to_string().c_str()); } hc_sums.clear(); Timer::end(); iteration++; if (iteration >= 6 && energy_pt_sto.uncert <= target_error * 0.7) { break; } if (iteration >= 10 && (energy_pt_sto + energy_pt_psto).uncert <= target_error) { break; } } hc_sums.clear_and_shrink(); Timer::end(); return energy_pt_sto + energy_pt_psto; } template <class S> template <class C> std::array<double, 2> Solver<S>::mapreduce_sum( const fgpl::DistHashMap<Det, C, DetHasher>& map, const std::function<double(const Det& det, const C& hc_sum)>& mapper) const { const int n_threads = omp_get_max_threads(); std::vector<double> res_sq_thread(n_threads, 0.0); std::vector<double> res_thread(n_threads, 0.0); map.for_each([&](const Det& key, const size_t, const C& value) { const int thread_id = omp_get_thread_num(); const double mapped = mapper(key, value); res_thread[thread_id] += mapped; res_sq_thread[thread_id] += mapped * mapped; }); std::array<double, 2> res_local = {0.0, 0.0}; std::array<double, 2> res; for (int i = 0; i < n_threads; i++) { res_local[0] += res_thread[i]; res_local[1] += res_sq_thread[i]; } MPI_Allreduce(&res_local, &res, 2, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); return res; } template <class S> bool Solver<S>::load_variation_result(const std::string& filename) { std::string serialized; const int TRUNK_SIZE = 1 << 20; char buffer[TRUNK_SIZE]; MPI_File file; int error; error = MPI_File_open( MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY | MPI_MODE_RDONLY, MPI_INFO_NULL, &file); if (error) return false; MPI_Offset size; MPI_File_get_size(file, &size); MPI_Status status; while (size > TRUNK_SIZE) { MPI_File_read_all(file, buffer, TRUNK_SIZE, MPI_CHAR, &status); serialized.append(buffer, TRUNK_SIZE); size -= TRUNK_SIZE; } MPI_File_read_all(file, buffer, size, MPI_CHAR, &status); serialized.append(buffer, size); MPI_File_close(&file); hps::from_string(serialized, system); if (Parallel::is_master()) { printf("Loaded %'zu dets from: %s\n", system.get_n_dets(), filename.c_str()); print_dets_info(); printf("HF energy: " ENERGY_FORMAT "\n", system.energy_hf); printf("Variational energy: " ENERGY_FORMAT "\n", system.energy_var); } return true; } template <class S> void Solver<S>::save_variation_result(const std::string& filename) { if (Parallel::is_master()) { std::ofstream file(filename, std::ofstream::binary); hps::to_stream(system, file); printf("Variational results saved to: %s\n", filename.c_str()); } } template <class S> void Solver<S>::save_pair_contrib(const double eps_var) { const auto& det_hf = system.dets[0]; // const size_t n_elecs = system.n_elecs; const size_t n_up = system.n_up; // const size_t n_dn = system.n_dn; if (det_hf.up != det_hf.dn) { throw std::invalid_argument("non sym det_hf not implemented"); } std::vector<std::vector<double>> contribs(n_up); std::string contrib_filename = Util::str_printf("pair_contrib_%#.2e.csv", eps_var); const auto& contrib_entry = Util::str_printf("pair_contrib/%#.2e", eps_var); Result::put<std::string>(contrib_entry, contrib_filename); std::ofstream contrib_file(contrib_filename); contrib_file << "i,j,pair_contrib" << std::endl; for (size_t i = 0; i < n_up; i++) { contribs[i].assign(n_up, 0.0); } const double c0 = system.coefs[0]; for (size_t det_id = 1; det_id < system.dets.size(); det_id++) { const auto& det = system.dets[det_id]; const double coef = system.coefs[det_id]; const auto& diff_up = det_hf.up.diff(det.up); const auto& diff_dn = det_hf.dn.diff(det.dn); const unsigned n_excite = diff_up.n_diffs + diff_dn.n_diffs; if (n_excite > 2) continue; size_t i = 0; size_t j = 0; const auto& H = system.get_hamiltonian_elem(det_hf, det, -1); if (diff_up.n_diffs == 2) { i = diff_up.left_only[0]; j = diff_up.left_only[1]; } else if (diff_up.n_diffs == 1) { i = diff_up.left_only[0]; if (diff_dn.n_diffs == 1) { j = diff_dn.left_only[0]; if (j < i) { std::swap(i, j); } } else { j = i; } } else { i = diff_dn.left_only[0]; if (diff_dn.n_diffs == 2) { j = diff_dn.left_only[1]; if (j < i) { std::swap(i, j); } } else { j = i; } } if (det.up == det.dn) { contribs[i][j] += H * coef / c0; } else if (system.time_sym) { contribs[i][j] += H * coef / c0 * Util::SQRT2; } else { contribs[i][j] += H * coef / c0; } } contrib_file.precision(15); for (size_t i = 0; i < n_up; i++) { for (size_t j = i; j < n_up; j++) { if (i != j) { contribs[i][j] /= 2; } contrib_file << i << "," << j << "," << contribs[i][j] << std::endl; } } contrib_file.close(); } template <class S> void Solver<S>::print_dets_info() const { if (system.time_sym) { // Print effective dets for unpacked time sym. size_t n_eff_dets = 0; for (const auto& det : system.dets) { if (det.up == det.dn) { n_eff_dets += 1; } else if (det.up < det.dn) { n_eff_dets += 2; } else { throw std::runtime_error("wf has unvalid det for time sym"); } } printf("Effect dets (without time sym): %'zu\n", n_eff_dets); } // Print excitations. std::unordered_map<unsigned, size_t> excitations; std::unordered_map<unsigned, double> weights; unsigned highest_excitation = 0; const auto& det_hf = system.dets[0]; for (size_t i = 0; i < system.dets.size(); i++) { const auto& det = system.dets[i]; const double coef = system.coefs[i]; const unsigned n_excite = det_hf.up.n_diffs(det.up) + det_hf.dn.n_diffs(det.dn); if (det.up != det.dn && system.time_sym) { excitations[n_excite] += 2; } else { excitations[n_excite] += 1; } weights[n_excite] += coef * coef; if (highest_excitation < n_excite) highest_excitation = n_excite; } printf("----------------------------------------\n"); printf("%-10s%12s%16s\n", "Excite Lv", "# dets", "Sum c^2"); for (unsigned i = 0; i <= highest_excitation; i++) { if (excitations.count(i) == 0) { excitations[i] = 0; weights[i] = 0.0; } printf("%-10u%12zu%16.8f\n", i, excitations[i], weights[i]); } // Print orb occupations. std::vector<double> orb_occupations(system.n_orbs, 0.0); #pragma omp parallel for schedule(static, 1) for (unsigned j = 0; j < system.n_orbs; j++) { for (size_t i = 0; i < system.dets.size(); i++) { const auto& det = system.dets[i]; const double coef = system.coefs[i]; if (det.up.has(j)) { orb_occupations[j] += coef * coef; } if (det.dn.has(j)) { orb_occupations[j] += coef * coef; } } } printf("----------------------------------------\n"); printf("%-10s%12s%16s\n", "Orbital", "", "Sum c^2"); for (unsigned j = 0; j < system.n_orbs && j < 50; j++) { printf("%-10u%12s%16.8f\n", j, "", orb_occupations[j]); } double sum_orb_occupation = std::accumulate(orb_occupations.begin(), orb_occupations.end(), 0.0); printf("Sum orbitals c^2: %.8f\n", sum_orb_occupation); // Print most important dets. printf("----------------------------------------\n"); printf("Most important dets:\n"); std::vector<size_t> det_order(system.dets.size()); for (size_t i = 0; i < system.dets.size(); i++) { det_order[i] = i; } const auto& comp = [&](const size_t a, const size_t b) { if (std::abs(system.coefs[a]) != std::abs(system.coefs[b])) { return std::abs(system.coefs[a]) < std::abs(system.coefs[b]); } return a > b; }; std::priority_queue<size_t, std::vector<size_t>, decltype(comp)> det_ordered(comp, det_order); printf("%-10s%12s %-12s\n", "Excite Lv", "Coef", "Det (Reordered orb)"); for (size_t i = 0; i < std::min((size_t)20, system.dets.size()); i++) { size_t ordered_i = det_ordered.top(); det_ordered.pop(); const double coef = system.coefs[ordered_i]; const auto& det = system.dets[ordered_i]; const auto& occs_up = det.up.get_occupied_orbs(); const auto& occs_dn = det.dn.get_occupied_orbs(); const unsigned n_excite = det_hf.up.n_diffs(det.up) + det_hf.dn.n_diffs(det.dn); printf("%-10u%12.8f", n_excite, coef); printf(" | "); for (unsigned j = 0; j < system.n_up; j++) { printf("%2u ", occs_up[j]); } printf("| "); for (unsigned j = 0; j < system.n_dn; j++) { printf("%2u ", occs_dn[j]); } printf("|\n"); } printf("----------------------------------------\n"); } template <class S> std::string Solver<S>::get_wf_filename(const double eps_var) const { return Util::str_printf("wf_eps1_%#.2e.dat", eps_var); }
XSHA512_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2008,2011 by Solar Designer */ #if FMT_EXTERNS_H extern struct fmt_main fmt_XSHA512; #elif FMT_REGISTERS_H john_register_one(&fmt_XSHA512); #else #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "memdbg.h" #define FORMAT_LABEL "xsha512" #define FORMAT_NAME "Mac OS X 10.7" #if ARCH_BITS >= 64 #define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 107 #define CIPHERTEXT_LENGTH 136 #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define SALT_SIZE 4 #define SALT_ALIGN sizeof(ARCH_WORD_32) #define MIN_KEYS_PER_CRYPT 1 #ifdef _OPENMP #define MAX_KEYS_PER_CRYPT (0x200 * 3) #else #define MAX_KEYS_PER_CRYPT 0x100 #endif #if ARCH_BITS >= 64 || defined(__SSE2__) /* 64-bitness happens to correlate with faster memcpy() */ #define PRECOMPUTE_CTX_FOR_SALT #else #undef PRECOMPUTE_CTX_FOR_SALT #endif static struct fmt_tests tests[] = { {"bb0489df7b073e715f19f83fd52d08ede24243554450f7159dd65c100298a5820525b55320f48182491b72b4c4ba50d7b0e281c1d98e06591a5e9c6167f42a742f0359c7", "password"}, {"$LION$74911f723bd2f66a3255e0af4b85c639776d510b63f0b939c432ab6e082286c47586f19b4e2f3aab74229ae124ccb11e916a7a1c9b29c64bd6b0fd6cbd22e7b1f0ba1673", "hello"}, {"$LION$5e3ab14c8bd0f210eddafbe3c57c0003147d376bf4caf75dbffa65d1891e39b82c383d19da392d3fcc64ea16bf8203b1fc3f2b14ab82c095141bb6643de507e18ebe7489", "boobies"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int (*saved_key_length); static ARCH_WORD_32 (*crypt_out)[16]; #ifdef PRECOMPUTE_CTX_FOR_SALT static SHA512_CTX ctx_salt; #else static ARCH_WORD_32 saved_salt; #endif static void init(struct fmt_main *self) { saved_key = mem_calloc_tiny(sizeof(*saved_key) * MAX_KEYS_PER_CRYPT, MEM_ALIGN_WORD); saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * MAX_KEYS_PER_CRYPT, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * MAX_KEYS_PER_CRYPT, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; /* Require lowercase hex digits (assume ASCII) */ pos = ciphertext; if (strncmp(pos, "$LION$", 6)) return 0; pos += 6; while (atoi16[ARCH_INDEX(*pos)] != 0x7F && (*pos <= '9' || *pos >= 'a')) pos++; return !*pos && pos - ciphertext == CIPHERTEXT_LENGTH+6; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char Buf[200]; if (!strncmp(split_fields[1], "$LION$", 6)) return split_fields[1]; if (split_fields[0] && strlen(split_fields[0]) == CIPHERTEXT_LENGTH) { sprintf(Buf, "$LION$%s", split_fields[0]); if (valid(Buf, self)) { char *cp = mem_alloc_tiny(CIPHERTEXT_LENGTH+7, MEM_ALIGN_NONE); strcpy(cp, Buf); return cp; } } if (strlen(split_fields[1]) == CIPHERTEXT_LENGTH) { sprintf(Buf, "$LION$%s", split_fields[1]); if (valid(Buf, self)) { char *cp = mem_alloc_tiny(CIPHERTEXT_LENGTH+7, MEM_ALIGN_NONE); strcpy(cp, Buf); return cp; } } return split_fields[1]; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; char *p; int i; ciphertext += 6; p = ciphertext + 8; for (i = 0; i < sizeof(buf.c); i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *salt(char *ciphertext) { static union { unsigned char c[SALT_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; char *p; int i; ciphertext += 6; p = ciphertext; for (i = 0; i < sizeof(buf.c); i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xF; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xFF; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xFFF; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xFFFF; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xFFFFF; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xFFFFFF; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7FFFFFF; } static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { #ifdef PRECOMPUTE_CTX_FOR_SALT SHA512_Init(&ctx_salt); SHA512_Update(&ctx_salt, salt, SALT_SIZE); #else saved_salt = *(ARCH_WORD_32 *)salt; #endif } static void set_key(char *key, int index) { int length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; saved_key_length[index] = length; memcpy(saved_key[index], key, length); } static char *get_key(int index) { saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i; #ifdef _OPENMP #ifdef PRECOMPUTE_CTX_FOR_SALT #pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_length, crypt_out) #else #pragma omp parallel for default(none) private(i) shared(saved_salt, count, saved_key, saved_key_length, crypt_out) #endif #endif for (i = 0; i < count; i++) { SHA512_CTX ctx; #ifdef PRECOMPUTE_CTX_FOR_SALT memcpy(&ctx, &ctx_salt, sizeof(ctx)); #else SHA512_Init(&ctx); SHA512_Update(&ctx, &saved_salt, SALT_SIZE); #endif SHA512_Update(&ctx, saved_key[i], saved_key_length[i]); SHA512_Final((unsigned char *)(crypt_out[i]), &ctx); } return count; } static int cmp_all(void *binary, int count) { ARCH_WORD_32 b0 = *(ARCH_WORD_32 *)binary; int i; for (i = 0; i < count; i++) { if (b0 != crypt_out[i][0]) continue; if (!memcmp(binary, crypt_out[i], BINARY_SIZE)) return 1; } return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_XSHA512 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid, fmt_default_split, get_binary, salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__identity_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int16_uint8 // op(A') function: GB_tran__identity_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int16_uint8 ( int16_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
firstprivate-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // use of firstprivate() void foo(int * a, int n, int g) { int i; #pragma omp parallel for firstprivate (g) for (i=0;i<n;i++) { a[i] = a[i]+g; } } int a[100]; int main() { foo(a, 100, 7); return 0; }
fmatrix.h
#ifndef _FMATRIX_H #define _FMATRIX_H CPS_START_NAMESPACE template<typename T> class basicMatrix{ T* tt; int rows, cols; int fsize; //number of elements void free(){ if(tt!=NULL) sfree("basicMatrix","~basicMatrix","free",tt); } void alloc(const int &_rows, const int &_cols, T const* cp = NULL){ if(_rows != rows || _cols != cols){ free(); rows = _rows; cols = _cols; fsize = rows*cols; tt = (T*)smalloc("basicMatrix", "basicMatrix", "alloc" , sizeof(T) * fsize); } if(cp != NULL) for(int i=0;i<fsize;i++) tt[i] = cp[i]; } public: basicMatrix(): rows(0),cols(0),fsize(0),tt(NULL){ } basicMatrix(const int &_rows, const int &_cols): tt(NULL){ alloc(_rows,_cols); } basicMatrix(const basicMatrix<T> &r){ alloc(r.rows,r.cols,r.tt); } T* ptr(){ return tt;} void resize(const int &_rows, const int &_cols){ alloc(_rows,_cols); } inline const T & operator()(const int &i, const int &j) const{ return tt[j + cols*i]; } inline T & operator()(const int &i, const int &j){ return tt[j + cols*i]; } inline const int &nRows() const{ return rows; } inline const int &nCols() const{ return cols; } ~basicMatrix(){ free(); } }; //A matrix of complex numbers and some useful associated methods template<typename mf_Complex> class fMatrix{ mf_Complex* tt; int rows, cols; int fsize; //number of elements void free_matrix(){ if(tt!=NULL) sfree("fMatrix","~fMatrix","free",tt); } void alloc_matrix(const int _rows, const int _cols, mf_Complex const* cp = NULL){ if(_rows != rows || _cols != cols){ free_matrix(); rows = _rows; cols = _cols; fsize = rows*cols; tt = (mf_Complex*)smalloc("fMatrix", "fMatrix", "alloc" , sizeof(mf_Complex) * fsize); } if(cp == NULL) zero(); else for(int i=0;i<fsize;i++) tt[i] = cp[i]; } public: fMatrix(): rows(0),cols(0),fsize(0),tt(NULL){ } fMatrix(const int &_rows, const int &_cols): rows(0), cols(0), fsize(0),tt(NULL){ alloc_matrix(_rows,_cols); } fMatrix(const fMatrix<mf_Complex> &r): rows(0), cols(0), fsize(0),tt(NULL){ alloc_matrix(r.rows,r.cols,r.tt); } mf_Complex *ptr(){ return tt;} void resize(const int _rows, const int _cols){ alloc_matrix(_rows,_cols); } void zero(){ for(int i=0;i<fsize;i++) tt[i] = 0.0; } fMatrix & operator*=(const mf_Complex &r){ for(int i=0;i<fsize;i++) tt[i] *= r; return *this; } fMatrix & operator*=(const typename mf_Complex::value_type &r){ for(int i=0;i<fsize*2;i++) ((typename mf_Complex::value_type*)tt)[i] *= r; return *this; } fMatrix & operator+=(const fMatrix<mf_Complex> &r){ for(int i=0;i<fsize;i++) tt[i] += r.tt[i]; return *this; } inline const mf_Complex & operator()(const int i, const int j) const{ return tt[j + cols*i]; } inline mf_Complex & operator()(const int i, const int j){ return tt[j + cols*i]; } inline const int nRows() const{ return rows; } inline const int nCols() const{ return cols; } void nodeSum(){ QMP_sum_array( (typename mf_Complex::value_type*)tt,2*fsize); } ~fMatrix(){ free_matrix(); } void write(const std::string &filename) const{ FILE *p; if((p = Fopen(filename.c_str(),"w")) == NULL) ERR.FileA("fMatrix","write",filename.c_str()); for(int r=0;r<rows;r++) for(int c=0;c<cols;c++) Fprintf(p,"%d %d %.16e %.16e\n",r,c, (*this)(r,c).real(), (*this)(r,c).imag()); Fclose(p); } }; //Rearrange an Lt*Lt matrix from ordering tsnk, tsrc to tsrc, tsep=tsnk-tsrc template<typename mf_Complex> void rearrangeTsrcTsep(fMatrix<mf_Complex> &m){ int Lt = GJP.Tnodes()*GJP.TnodeSites(); if(m.nRows()!=Lt || m.nCols()!=Lt) ERR.General("","rearrangeTsrcTsep(fMatrix<mf_Complex> &)","Expect an Lt*Lt matrix\n"); fMatrix<mf_Complex> tmp(m); for(int tsnk=0;tsnk<Lt;tsnk++){ for(int tsrc=0;tsrc<Lt;tsrc++){ int tsep = (tsnk-tsrc+Lt) % Lt; m(tsrc,tsep) = tmp(tsnk,tsrc); } } } //A vector of complex numbers and some useful associated methods template<typename mf_Complex> class fVector{ mf_Complex* tt; int fsize; void free_mem(){ if(tt!=NULL) sfree("fVector","~fVector","free",tt); } void alloc_mem(const int _elems, mf_Complex const* cp = NULL){ if(_elems != fsize){ free_mem(); fsize = _elems; tt = (mf_Complex*)smalloc("fVector", "fVector", "alloc" , sizeof(mf_Complex) * fsize); } if(cp == NULL) zero(); else for(int i=0;i<fsize;i++) tt[i] = cp[i]; } public: fVector(): fsize(0),tt(NULL){ } fVector(const int _elems): fsize(0),tt(NULL){ alloc_mem(_elems); } fVector(const fVector<mf_Complex> &r): fsize(0),tt(NULL){ alloc_mem(r.fsize,r.tt); } mf_Complex *ptr(){ return tt;} void resize(const int _elems){ alloc_mem(_elems); } void zero(){ for(int i=0;i<fsize;i++) tt[i] = mf_Complex(0,0); } fVector & operator*=(const mf_Complex &r){ for(int i=0;i<fsize;i++) tt[i] *= r; return *this; } fVector & operator*=(const typename mf_Complex::value_type &r){ for(int i=0;i<fsize*2;i++) ((typename mf_Complex::value_type*)tt)[i] *= r; return *this; } inline const mf_Complex & operator()(const int &i) const{ return tt[i]; } inline mf_Complex & operator()(const int &i){ return tt[i]; } inline const int &size() const{ return fsize; } void nodeSum(){ QMP_sum_array( (typename mf_Complex::value_type*)tt,2*fsize); } ~fVector(){ free_mem(); } void write(const std::string &filename) const{ FILE *p; if((p = Fopen(filename.c_str(),"w")) == NULL) ERR.FileA("fVector","write",filename.c_str()); for(int i=0;i<fsize;i++) Fprintf(p,"%d %.16e %.16e\n",i, tt[i].real(), tt[i].imag()); Fclose(p); } }; //Array of complex with optional threading template<typename mf_Complex, typename AllocPolicy = StandardAllocPolicy> class basicComplexArray: public AllocPolicy{ protected: int thread_size; //size of each thread unit int nthread; int size; //total size mf_Complex *con; public: basicComplexArray(): size(0), con(NULL){} basicComplexArray(const int &_thread_size, const int &_nthread = 1): size(0), con(NULL){ resize(_thread_size,_nthread); } void free_mem(){ if(con != NULL){ AllocPolicy::_free(con); con = NULL; } } void resize(const int &_thread_size, const int &_nthread = 1){ free_mem(); thread_size = _thread_size; nthread = _nthread; size = _thread_size * _nthread; this->_alloc(&con, size*sizeof(mf_Complex)); memset((void*)con, 0, size * sizeof(mf_Complex)); } ~basicComplexArray(){ free_mem(); } inline const mf_Complex & operator[](const int i) const{ return con[i]; } inline mf_Complex & operator[](const int i){ return con[i]; } inline mf_Complex & operator()(const int i, const int thread){ return con[i + thread * thread_size]; } int nElementsTotal() const{ return size; } int nElementsPerThread() const{ return thread_size; } int nThreads() const{ return nthread; } //Sum (reduce) over all threads void threadSum(){ if(nthread == 1) return; basicComplexArray<mf_Complex,AllocPolicy> tmp(thread_size,1); #pragma omp parallel for for(int i=0;i<thread_size;i++){ for(int t=0;t<nthread;t++) tmp.con[i] += con[i + t*thread_size]; } AllocPolicy::_free(con); con = tmp.con; nthread = 1; size = tmp.size; tmp.con = NULL; } void nodeSum(){ globalSumComplex(this->con,size); //QMP_sum_array( (typename mf_Complex::value_type*)con,2*size); } }; CPS_END_NAMESPACE #endif
convolution_packn.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packn, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl); if (bias_data_ptr) { _sum = vle32_v_f32m1(bias_data_ptr + p * packn, vl); } const float* kptr = (const float*)weight_data_packn.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) // 29.23 { const float* slptr = sptr + space_ofs[k] * packn; for (int l = 0; l < packn; l++) { float val = *slptr++; vfloat32m1_t _w0 = vle32_v_f32m1(kptr, vl); _sum = vfmacc_vf_f32m1(_sum, val, _w0, vl); kptr += packn; } } } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse32_v_f32m1(outptr + j * packn, _sum, vl); } outptr += outw * packn; } } }
rawmd5u_fmt_plug.c
/* * Thick raw-md5-unicode (come-back :) * * This software is Copyright (c) 2011 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawmd5uthick; #elif FMT_REGISTERS_H john_register_one(&fmt_rawmd5uthick); #else #include <string.h> #include "arch.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #endif #include "simd-intrinsics.h" #include "md5.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "memory.h" #include "johnswap.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-MD5u" #define FORMAT_NAME "" #define ALGORITHM_NAME "md5(utf16($p)) " MD5_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #ifdef SIMD_COEF_32 #define BLOCK_LOOPS 1 #define PLAINTEXT_LENGTH 27 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS * BLOCK_LOOPS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 ) #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef SIMD_COEF_32 static unsigned char (*saved_key); static unsigned char (*crypt_key); static unsigned int (**buf_ptr); #else static MD5_CTX ctx; static int saved_len; static UTF16 saved_key[PLAINTEXT_LENGTH + 1]; static uint32_t crypt_key[BINARY_SIZE / 4]; #endif /* Note some plaintexts will be replaced in init() if running UTF-8 */ static struct fmt_tests tests[] = { {"16c47151c18ac087cd12b3a70746c790", "test1"}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, {"9c3abef89ff76f8acd80eae37b35f64f", "test2"}, {"849ee1b88b5d887bdb058180a666b450", "test3"}, {"8c4cb7e8b33b56a833cdaa8673f3b425", "test4"}, {"537e738b1ac5551f65106368dc301ece", "thatsworking"}, // repeat first hash in exactly the same form that is used in john.pot {"$dynamic_29$16c47151c18ac087cd12b3a70746c790", "test1"}, {NULL} }; static void set_key_utf8(char *_key, int index); static void set_key_CP(char *_key, int index); static void init(struct fmt_main *self) { #if SIMD_COEF_32 int i; #endif if (options.target_enc == UTF_8) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_utf8; #if SIMD_COEF_32 /* kick it up from 27. We will truncate in setkey_utf8() */ self->params.plaintext_length = 3 * PLAINTEXT_LENGTH; #endif tests[1].ciphertext = "94a4e171de16580742c4d141e6607bf7"; tests[1].plaintext = "\xE2\x82\xAC"; // Euro sign tests[2].ciphertext = "03c60810f0e54d16e826aca385d776c8"; tests[2].plaintext = "\xE2\x82\xAC\xE2\x82\xAC"; // 2 x euro tests[3].ciphertext = "2d554433d7cde7ec8d16aaf126c3be6b"; tests[3].plaintext = "\xE2\x82\xAC\xC3\xBC"; // euro and u-umlaut tests[4].ciphertext = "8007d9070b27db7b30433df2cd10abc1"; tests[4].plaintext = "\xC3\xBC\xE2\x82\xAC"; // u-umlaut and euro } else { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_CP; } if (CP_to_Unicode[0xfc] == 0x00fc) { tests[1].ciphertext = "ea7ab2b5c07650badab30790d0c9b63e"; tests[1].plaintext = "\xFC"; // German u-umlaut in iso-8859-1 tests[2].ciphertext = "f0a0b9f1dea0e458cec9a284ff434d44"; tests[2].plaintext = "\xFC\xFC"; tests[3].ciphertext = "d25a0b436b768777cc9a343d283dbf5a"; tests[3].plaintext = "\xFC\xFC\xFC"; tests[4].ciphertext = "719917322bf12168f8c55939e4fec8de"; tests[4].plaintext = "\xFC\xFC\xFC\xFC"; } } #if SIMD_COEF_32 saved_key = mem_calloc_align(sizeof(*saved_key), 64*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(sizeof(*crypt_key), BINARY_SIZE*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); buf_ptr = mem_calloc_align(sizeof(*buf_ptr), self->params.max_keys_per_crypt, sizeof(*buf_ptr)); for (i=0; i<self->params.max_keys_per_crypt; i++) buf_ptr[i] = (unsigned int*)&saved_key[GETPOS(0, i)]; #endif } static void done(void) { #ifdef SIMD_COEF_32 MEM_FREE(buf_ptr); MEM_FREE(crypt_key); MEM_FREE(saved_key); #endif } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[32+12+1]; if (!strncmp(ciphertext, "$dynamic_29$", 12)) ciphertext += 12; strcpy(out, "$dynamic_29$"); memcpy(&out[12], ciphertext, 32); out[sizeof(out)-1] = 0; strlwr(&out[12]); return out; } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (!strncmp(ciphertext, "$dynamic_29$", 12)) ciphertext += 12; for (pos = ciphertext; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && pos - ciphertext == CIPHERTEXT_LENGTH) return 1; else return 0; } static void *get_binary(char *ciphertext) { static union { unsigned long dummy; unsigned int i[BINARY_SIZE/sizeof(unsigned int)]; } _out; unsigned int *out = _out.i; unsigned int i; unsigned int temp; ciphertext+=12; for (i=0; i<4; i++) { temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24; #if ARCH_LITTLE_ENDIAN out[i]=temp; #else out[i]=JOHNSWAP(temp); #endif } return out; } // ISO-8859-1 to UCS-2, directly into vector key buffer static void set_key(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else #if ARCH_LITTLE_ENDIAN UTF8 *s = (UTF8*)_key; UTF16 *d = saved_key; while (*s) *d++ = *s++; *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #else UTF8 *s = (UTF8*)_key; UTF8 *d = (UTF8*)saved_key; while (*s) { *d++ = *s++; ++d; } *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #endif #endif } // Legacy codepage to UCS-2, directly into vector key buffer static void set_key_CP(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp = CP_to_Unicode[temp]; temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning_enc; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning_enc: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = enc_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } // UTF-8 to UCS-2, directly into vector key buffer static void set_key_utf8(char *_key, int index) { #ifdef SIMD_COEF_32 const UTF8 *source = (UTF8*)_key; unsigned int *keybuf_word = buf_ptr[index]; UTF32 chl, chh = 0x80; unsigned int len = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH) { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else if (*source && len < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; } if (chh != 0x80 || len == 0) { *keybuf_word = 0x80; keybuf_word += SIMD_COEF_32; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = utf8_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 // Get the key back from the key buffer, from UCS-2 unsigned int *keybuffer = (unsigned int*)&saved_key[GETPOS(0, index)]; static UTF16 key[PLAINTEXT_LENGTH + 1 + 1]; // if only +1 we 'can' overflow. Not sure why, but ASan found it. unsigned int md5_size=0; unsigned int i=0; for (; md5_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md5_size++) { key[md5_size] = keybuffer[i]; key[md5_size+1] = keybuffer[i] >> 16; if (key[md5_size] == 0x80 && key[md5_size+1] == 0) { key[md5_size] = 0; break; } ++md5_size; if (key[md5_size] == 0x80 && ((keybuffer[i+SIMD_COEF_32]&0xFFFF) == 0 || md5_size == PLAINTEXT_LENGTH)) { key[md5_size] = 0; break; } } return (char*)utf16_to_enc(key); #else return (char*)utf16_to_enc(saved_key); #endif } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; for (;y<SIMD_PARA_MD5*BLOCK_LOOPS;y++) for (x=0;x<SIMD_COEF_32;x++) { if ( ((uint32_t*)binary)[0] == ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] ) return 1; } return 0; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; if ( ((uint32_t*)binary)[0] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] ) return 0; if ( ((uint32_t*)binary)[1] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+SIMD_COEF_32] ) return 0; if ( ((uint32_t*)binary)[2] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+2*SIMD_COEF_32] ) return 0; if ( ((uint32_t*)binary)[3] != ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+3*SIMD_COEF_32] ) return 0; return 1; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #if defined(SIMD_COEF_32) #if (BLOCK_LOOPS > 1) int i; // This was an experiment. It's not used (unless you bump BLOCK_LOOPS), // cause it does not scale well. We would need to parallelize set_key() #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < BLOCK_LOOPS; i++) SIMDmd5body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*BINARY_SIZE], NULL, SSEi_MIXED_IN); #else SIMDmd5body(saved_key, (unsigned int*)crypt_key, NULL, SSEi_MIXED_IN); #endif #else MD5_Init( &ctx ); MD5_Update(&ctx, (unsigned char*)saved_key, saved_len); MD5_Final((unsigned char*) crypt_key, &ctx); #endif return count; } #ifdef SIMD_COEF_32 static int get_hash_0(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_0; } static int get_hash_1(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_1; } static int get_hash_2(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_2; } static int get_hash_3(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_3; } static int get_hash_4(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_4; } static int get_hash_5(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_5; } static int get_hash_6(int index) { unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_6; } #else static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[index] & PH_MASK_6; } #endif struct fmt_main fmt_rawmd5uthick = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if (BLOCK_LOOPS > 1) && defined(SSE_MD5_PARA) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
sviper.h
#pragma once #include <cmath> #include <chrono> #include <limits> #include <sstream> #include <thread> #include <vector> #include <sviper/auxiliary.h> #include <sviper/basics.h> #include <sviper/config.h> #include <sviper/evaluate_final_mapping.h> #include <sviper/merge_split_alignments.h> #include <sviper/polishing.h> #include <sviper/variant.h> #include <seqan/align.h> #include <seqan/arg_parse.h> #include <seqan/bam_io.h> #include <seqan/sequence.h> #include <seqan/seq_io.h> #include <seqan/graph_msa.h> namespace sviper { seqan::ArgumentParser::ParseResult parseCommandLine(CmdOptions & options, int argc, char const ** argv) { // Setup ArgumentParser. seqan::ArgumentParser parser("SViper"); setVersion(parser, "2.0.0"); seqan::addOption(parser, seqan::ArgParseOption( "c", "candidate-vcf", "A structural variant vcf file (with e.g. <DEL> tags), containing the potential variant sites to be looked at.", seqan::ArgParseArgument::INPUT_FILE, "VCF_FILE")); seqan::addOption(parser, seqan::ArgParseOption( "s", "short-read-bam", "The indexed bam file containing short used for polishing at variant sites.", seqan::ArgParseArgument::INPUT_FILE, "BAM_FILE")); seqan::addOption(parser, seqan::ArgParseOption( "l", "long-read-bam", "The indexed bam file containing long reads to be polished at variant sites.", seqan::ArgParseArgument::INPUT_FILE, "BAM_FILE")); seqan::addOption(parser, seqan::ArgParseOption( "r", "reference", "The indexed (fai) reference file.", seqan::ArgParseArgument::INPUT_FILE, "FA_FILE")); seqan::addOption(parser, seqan::ArgParseOption( "t", "threads", "The threads to use.", seqan::ArgParseArgument::INTEGER, "INT")); seqan::addOption(parser, seqan::ArgParseOption( "k", "flanking-region", "The flanking region in bp's around a breakpoint to be considered for polishing", seqan::ArgParseArgument::INTEGER, "INT")); seqan::addOption(parser, seqan::ArgParseOption( "x", "coverage-short-reads", "The original short read mean coverage. This value is used to restrict short read coverage on extraction to avoid mapping bias", seqan::ArgParseArgument::INTEGER, "INT")); seqan::addOption(parser, seqan::ArgParseOption( "", "median-ins-size-short-reads", "The median of the short read insert size (end of read1 until beginning of read2). " "This value is used to compute a threshold for error correction.", seqan::ArgParseArgument::INTEGER, "INT")); seqan::addOption(parser, seqan::ArgParseOption( "", "stdev-ins-size-short-reads", "The median of the short read insert size (end of read1 until beginning of read2). " "This value is used to compute a threshold for error correction..", seqan::ArgParseArgument::INTEGER, "INT")); seqan::addOption(parser, seqan::ArgParseOption( "o", "output-prefix", "A name for the output files. The current output is a log file and vcf file, that contains the " "polished sequences for each variant.", seqan::ArgParseArgument::INPUT_FILE, "PREFIX")); seqan::addOption(parser, seqan::ArgParseOption( "v", "verbose", "Turn on detailed information about the process.")); seqan::addOption(parser, seqan::ArgParseOption( "", "output-polished-bam", "For debugging or manual inspection the polished reads can be written to a file.")); seqan::setRequired(parser, "c"); seqan::setRequired(parser, "l"); seqan::setRequired(parser, "s"); seqan::setRequired(parser, "r"); seqan::setMinValue(parser, "k", "50"); seqan::setMaxValue(parser, "k", "1000"); seqan::setDefaultValue(parser, "k", "400"); seqan::setDefaultValue(parser, "t", std::thread::hardware_concurrency()); // Parse command line. seqan::ArgumentParser::ParseResult res = seqan::parse(parser, argc, argv); // Only extract options if the program will continue after parseCommandLine() if (res != seqan::ArgumentParser::PARSE_OK) return res; // Extract option values. seqan::getOptionValue(options.candidate_file_name, parser, "candidate-vcf"); seqan::getOptionValue(options.long_read_file_name, parser, "long-read-bam"); seqan::getOptionValue(options.short_read_file_name, parser, "short-read-bam"); seqan::getOptionValue(options.reference_file_name, parser, "reference"); seqan::getOptionValue(options.output_prefix, parser, "output-prefix"); seqan::getOptionValue(options.flanking_region, parser, "flanking-region"); seqan::getOptionValue(options.mean_coverage_of_short_reads, parser, "coverage-short-reads"); seqan::getOptionValue(options.mean_insert_size_of_short_reads, parser, "median-ins-size-short-reads"); seqan::getOptionValue(options.stdev_insert_size_of_short_reads, parser, "stdev-ins-size-short-reads"); seqan::getOptionValue(options.threads, parser, "threads"); options.verbose = isSet(parser, "verbose"); options.output_polished_bam = isSet(parser, "output-polished-bam"); if (options.output_prefix.empty()) options.output_prefix = options.candidate_file_name + "_polished"; return seqan::ArgumentParser::PARSE_OK; } bool polish_variant(Variant & var, input_output_information & info) { std::stringstream localLog{}; seqan::BamFileIn & short_read_bam = *(info.short_read_file_handles[omp_get_thread_num()]); seqan::BamFileIn & long_read_bam = *(info.long_read_file_handles[omp_get_thread_num()]); seqan::FaiIndex & faiIndex = *(info.faidx_file_handles[omp_get_thread_num()]); if (var.sv_type != SV_TYPE::DEL && var.sv_type != SV_TYPE::INS) { #pragma omp critical info.log_file << "----------------------------------------------------------------------" << std::endl << " SKIP Variant " << var.id << " at " << var.ref_chrom << ":" << var.ref_pos << " " << var.alt_seq << " L:" << var.sv_length << std::endl << "----------------------------------------------------------------------" << std::endl; var.filter = "SKIP"; return false; } if (var.sv_length > 1000000) { #pragma omp critical info.log_file << "----------------------------------------------------------------------" << std::endl << " SKIP too long Variant " << var.ref_chrom << ":" << var.ref_pos << " " << var.alt_seq << " L:" << var.sv_length << std::endl << "----------------------------------------------------------------------" << std::endl; var.filter = "SKIP"; return false; } localLog << "----------------------------------------------------------------------" << std::endl << " PROCESS Variant " << var.id << " at " << var.ref_chrom << ":" << var.ref_pos << " " << var.alt_seq << " L:" << var.sv_length << std::endl << "----------------------------------------------------------------------" << std::endl; // Compute reference length, start and end position of region of interest // --------------------------------------------------------------------- unsigned ref_fai_idx = 0; if (!seqan::getIdByName(ref_fai_idx, faiIndex, var.ref_chrom)) { localLog << "[ ERROR ]: FAI index has no entry for reference name" << var.ref_chrom << std::endl; #pragma omp critical info.log_file << localLog.str() << std::endl; var.filter = "FAIL0"; return false; } // cash variables to avoid recomputing // Note that the positions are one based/ since the VCF format is one based int const ref_length = seqan::sequenceLength(faiIndex, ref_fai_idx); int const ref_region_start = std::max(1, var.ref_pos - info.cmd_options.flanking_region); int const ref_region_end = std::min(ref_length, var.ref_pos_end + info.cmd_options.flanking_region); int const var_ref_pos_add50 = std::min(ref_length, var.ref_pos + 50); int const var_ref_pos_sub50 = std::max(1, var.ref_pos - 50); int const var_ref_pos_end_add50 = std::min(ref_length, var.ref_pos_end + 50); int const var_ref_pos_end_sub50 = std::max(1, var.ref_pos_end - 50); localLog << "--- Reference region " << var.ref_chrom << ":" << ref_region_start << "-" << ref_region_end << std::endl; SEQAN_ASSERT_LEQ(ref_region_start, ref_region_end); // get reference id in bam File unsigned rID_short{}; unsigned rID_long{}; if (!seqan::getIdByName(rID_short, seqan::contigNamesCache(seqan::context(short_read_bam)), var.ref_chrom)) { localLog << "[ ERROR ]: No reference sequence named " << var.ref_chrom << " in short read bam file." << std::endl; var.filter = "FAIL6"; #pragma omp critical info.log_file << localLog.str() << std::endl; return false; } if (!seqan::getIdByName(rID_long, seqan::contigNamesCache(seqan::context(long_read_bam)), var.ref_chrom)) { localLog << "[ ERROR ]: No reference sequence named " << var.ref_chrom << " in long read bam file." << std::endl; var.filter = "FAIL7"; #pragma omp critical info.log_file << localLog.str() << std::endl; return false; } // Extract long reads // --------------------------------------------------------------------- std::vector<seqan::BamAlignmentRecord> supporting_records{}; { std::vector<seqan::BamAlignmentRecord> long_reads{}; // extract overlapping the start breakpoint +-50 bp's seqan::viewRecords(long_reads, long_read_bam, info.long_read_bai, rID_long, var_ref_pos_sub50, var_ref_pos_add50); // extract overlapping the end breakpoint +-50 bp's seqan::viewRecords(long_reads, long_read_bam, info.long_read_bai, rID_long, var_ref_pos_end_sub50, var_ref_pos_end_add50); if (long_reads.size() == 0) { localLog << "ERROR1: No long reads in reference region " << var.ref_chrom << ":" << var_ref_pos_sub50 << "-" << var_ref_pos_add50 << " or " << var.ref_chrom << ":" << var_ref_pos_end_sub50 << "-" << var_ref_pos_end_add50 << std::endl; var.filter = "FAIL1"; #pragma omp critical info.log_file << localLog.str() << std::endl; return false; } localLog << "--- Extracted " << long_reads.size() << " long read(s). May include duplicates. " << std::endl; // Search for supporting reads // --------------------------------------------------------------------- // TODO check if var is not empty! localLog << "--- Searching in (reference) region [" << (int)(var.ref_pos - DEV_POS * var.sv_length) << "-" << (int)(var.ref_pos + var.sv_length + DEV_POS * var.sv_length) << "]" << " for a variant of type " << var.alt_seq << " of length " << (int)(var.sv_length - DEV_SIZE * var.sv_length) << "-" << (int)(var.sv_length + DEV_SIZE * var.sv_length) << " bp's" << std::endl; for (auto const & rec : long_reads) if (record_supports_variant(rec, var) && length(rec.seq) > 0 /*sequence information is given*/) supporting_records.push_back(rec); if (supporting_records.size() == 0) { localLog << "--- No supporting reads that span the variant, start merging..." << std::endl; // Merge supplementary alignments to primary // --------------------------------------------------------------------- std::sort(long_reads.begin(), long_reads.end(), bamRecordNameLess()); long_reads = merge_alignments(long_reads); // next to merging this will also get rid of duplicated reads localLog << "--- After merging " << long_reads.size() << " read(s) remain(s)." << std::endl; for (auto const & rec : long_reads) if (record_supports_variant(rec, var) && length(rec.seq) > 0 /*sequence information is given*/) supporting_records.push_back(rec); if (supporting_records.size() == 0) // there are none at all { localLog << "ERROR2: No supporting long reads for a " << var.alt_seq << " in region " << var.ref_chrom << ":" << var_ref_pos_sub50 << "-" << var_ref_pos_end_add50 << std::endl; var.filter = "FAIL2"; #pragma omp critical info.log_file << localLog.str() << std::endl; return false; } } else { // remove duplicates std::sort(supporting_records.begin(), supporting_records.end(), bamRecordNameLess()); auto last = std::unique(supporting_records.begin(), supporting_records.end(), bamRecordEqual()); supporting_records.erase(last, supporting_records.end()); } localLog << "--- After searching for variant " << supporting_records.size() << " supporting read(s) remain." << std::endl; } // scope of long_reads ends // Crop fasta sequence of each supporting read for consensus // --------------------------------------------------------------------- localLog << "--- Cropping long reads with a buffer of +-" << info.cmd_options.flanking_region << " around variants." << std::endl; seqan::StringSet<seqan::Dna5String> supporting_sequences{}; std::vector<seqan::BamAlignmentRecord>::size_type maximum_long_reads = 5; // sort records such that the highest quality ones are chosen first std::sort(supporting_records.begin(), supporting_records.end(), bamRecordMapQGreater()); for (unsigned i = 0; i < std::min(maximum_long_reads, supporting_records.size()); ++i) { auto region = get_read_region_boundaries(supporting_records[i], ref_region_start, ref_region_end); assert(std::get<0>(region) >= 0); assert(std::get<1>(region) >= 0); assert(std::get<0>(region) <= std::get<1>(region)); assert(std::get<1>(region) - std::get<0>(region) <= static_cast<int>(length(supporting_records[i].seq))); seqan::Dna5String reg = seqan::infix(supporting_records[i].seq, std::get<0>(region), std::get<1>(region)); // For deletions, the expected size of the subsequence is that of // the flanking region, since the rest is deleted. For insertions it // is that of the flanking region + the insertion length. int32_t expected_length{2*info.cmd_options.flanking_region}; if (var.sv_type == SV_TYPE::INS) expected_length += var.sv_length; if (abs(static_cast<int32_t>(seqan::length(reg)) - expected_length) > info.cmd_options.flanking_region) { localLog << "------ Skip Read - Length:" << seqan::length(reg) << " Qual:" << supporting_records[i].mapQ << " Name: "<< supporting_records[i].qName << std::endl; ++maximum_long_reads; return false; // do not use under or oversized region } seqan::appendValue(supporting_sequences, reg); localLog << "------ Region: [" << std::get<0>(region) << "-" << std::get<1>(region) << "] Length:" << seqan::length(reg) << " Qual:" << supporting_records[i].mapQ << " Name: "<< supporting_records[i].qName << std::endl; } if (seqan::length(supporting_sequences) == 0) { localLog << "ERROR3: No fitting regions for a " << var.alt_seq << " in region " << var.ref_chrom << ":" << var_ref_pos_sub50 << "-" << var_ref_pos_end_add50 << std::endl; var.filter = "FAIL3"; #pragma omp critical info.log_file << localLog.str() << std::endl; return false; } // Build consensus of supporting read regions // --------------------------------------------------------------------- std::vector<double> mapping_qualities{}; mapping_qualities.resize(supporting_records.size()); for (unsigned i = 0; i < supporting_records.size(); ++i) mapping_qualities[i] = (supporting_records[i]).mapQ; seqan::Dna5String cns = build_consensus(supporting_sequences, mapping_qualities); localLog << "--- Built a consensus with a MSA of length " << seqan::length(cns) << "." << std::endl; seqan::Dna5String polished_ref{}; SViperConfig config{info.cmd_options}; config.ref_flank_length = 500; { seqan::StringSet<seqan::Dna5QString> short_reads_1{}; // reads (first in pair) seqan::StringSet<seqan::Dna5QString> short_reads_2{}; // mates (second in pair) { // Extract short reads in region // --------------------------------------------------------------------- std::vector<seqan::BamAlignmentRecord> short_reads{}; // If the breakpoints are farther apart then illumina-read-length + 2 * flanking-region, // then extract reads for each break point separately. if (ref_region_end - ref_region_start > info.cmd_options.flanking_region * 2 + info.cmd_options.length_of_short_reads) { // extract reads left of the start of the variant [start-flanking_region, start+flanking_region] unsigned e = std::min(ref_length, var.ref_pos + info.cmd_options.flanking_region); seqan::viewRecords(short_reads, short_read_bam, info.short_read_bai, rID_short, ref_region_start, e); cut_down_high_coverage(short_reads, info.cmd_options.mean_coverage_of_short_reads); // and right of the end of the variant [end-flanking_region, end+flanking_region] std::vector<seqan::BamAlignmentRecord> tmp_short_reads; unsigned s = std::max(1, var.ref_pos_end - info.cmd_options.flanking_region); seqan::viewRecords(tmp_short_reads, short_read_bam, info.short_read_bai, rID_short, s, ref_region_end); cut_down_high_coverage(tmp_short_reads, info.cmd_options.mean_coverage_of_short_reads); append(short_reads, tmp_short_reads); } else { // extract reads left of the start of the variant [start-flanking_region, start] seqan::viewRecords(short_reads, short_read_bam, info.short_read_bai, rID_short, ref_region_start, ref_region_end); cut_down_high_coverage(short_reads, info.cmd_options.mean_coverage_of_short_reads); } if (short_reads.size() < 20) { localLog << "ERROR4: Not enough short reads (only " << short_reads.size() << ") for variant of type " << var.alt_seq << " in region " << var.ref_chrom << ":" << ref_region_start << "-" << ref_region_end << std::endl; var.filter = "FAIL4"; #pragma omp critical info.log_file << localLog.str() << std::endl; return false; } records_to_read_pairs(short_reads_1, short_reads_2, short_reads, short_read_bam, info.short_read_bai); localLog << "--- Extracted " << seqan::length(short_reads_1) << " pairs (proper or dummy pairs)." << std::endl; } // scope of short reads ends // Flank consensus sequence // --------------------------------------------------------------------- // Before polishing, append a reference flank to the conesnsus such that // the reads find a high quality anchor for mapping and pairs are correctly // identified. seqan::Dna5String flanked_consensus = append_ref_flanks(cns, faiIndex, ref_fai_idx, ref_length, ref_region_start, ref_region_end, config.ref_flank_length); // Polish flanked consensus sequence with short reads // --------------------------------------------------------------------- compute_baseQ_stats(config, short_reads_1, short_reads_2); //TODO:: return qualities and assign to config outside localLog << "--- Short read base qualities: avg=" << config.baseQ_mean << " stdev=" << config.baseQ_std << "." << std::endl; polished_ref = polish_to_perfection(short_reads_1, short_reads_2, flanked_consensus, config); localLog << "DONE POLISHING: Total of " << config.substituted_bases << " substituted, " << config.deleted_bases << " deleted and " << config.inserted_bases << " inserted bases. " << config.rounds << " rounds." << std::endl; } // scope of short_reads1 and short_reads2 ends for (unsigned i = config.ref_flank_length; i < seqan::length(config.cov_profile) - config.ref_flank_length; ++i) localLog << config.cov_profile[i] << " "; localLog << std::endl; // Align polished sequence to reference // --------------------------------------------------------------------- seqan::Dna5String ref_part{}; seqan::readRegion(ref_part, faiIndex, ref_fai_idx, std::max(1u, ref_region_start - config.ref_flank_length), std::min(ref_region_end + config.ref_flank_length, static_cast<unsigned>(ref_length))); typedef seqan::Gaps<seqan::Dna5String, seqan::ArrayGaps> TGapsRead; typedef seqan::Gaps<seqan::Dna5String, seqan::ArrayGaps> TGapsRef; TGapsRef gapsRef(ref_part); TGapsRead gapsSeq(polished_ref); seqan::globalAlignment(gapsRef, gapsSeq, seqan::Score<double, seqan::Simple>(config.MM, config.MX, config.GE, config.GO), seqan::AlignConfig<false, false, false, false>(), seqan::ConvexGaps()); seqan::BamAlignmentRecord final_record{}; final_record.beginPos = std::max(0u, ref_region_start - config.ref_flank_length); final_record.seq = polished_ref; seqan::getIdByName(final_record.rID, seqan::contigNamesCache(seqan::context(long_read_bam)), var.ref_chrom); seqan::getCigarString(final_record.cigar, gapsRef, gapsSeq, std::numeric_limits<unsigned>::max()); // std::numeric_limits<unsigned>::max() because in function getCigarString the value is compared to type unsigned // And we never want replace a Deletions D with N (short read exon identification) // Evaluate Alignment // --------------------------------------------------------------------- // If refine_variant fails, the variant is not supported anymore but remains unchanged. // If not, the variant now might have different start/end positions and other information assign_quality(final_record, var, config); // assigns a score to record.mapQ and var.quality if (!refine_variant(final_record, var)) { var.filter = "FAIL5"; //assign_quality(record, var, false); localLog << "ERROR5: \"Polished away\" variant " << var.id << " at " << var.ref_chrom << ":" << var.ref_pos << "\t" << var.alt_seq << "\tScore:\t" << var.quality << std::endl; } else { //assign_quality(record, var, true); localLog << "SUCCESS: Polished variant " << var.id << " at " << var.ref_chrom << ":" << var.ref_pos << "\t" << var.alt_seq << "\tScore:\t" << var.quality << std::endl; } if (info.cmd_options.output_polished_bam) { std::string read_identifier = (std::string("polished_var") + ":" + var.ref_chrom + ":" + std::to_string(var.ref_pos) + ":" + std::to_string(var.ref_pos_end) + ":" + var.id); final_record.qName = read_identifier; #pragma omp critical info.polished_reads.push_back(final_record); } #pragma omp critical info.log_file << localLog.str() << std::endl; return true; } } // namespace sviper
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p * inch * 9; const float* k1 = kernel + (p + 1) * inch * 9; for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr0n = outptr0 + outw; float* outptr1n = outptr1 + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0 + 3); float32x4_t _k06 = vld1q_f32(k0 + 6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1 + 3); float32x4_t _k16 = vld1q_f32(k1 + 6); #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n" // r0 "add %5, %5, #16 \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v14.4s, v15.4s}, [%8] \n" // r3 "add %8, %8, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n" // _sum0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n" // _sum1 "fmla v6.4s, v8.4s, %18.s[0] \n" "fmla v7.4s, v8.4s, %21.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v12.4s}, [%3] \n" // _sum0n "prfm pldl1keep, [%4, #128] \n" "ld1 {v13.4s}, [%4] \n" // _sum1n "fmla v12.4s, v14.4s, %20.s[0] \n" "fmla v13.4s, v14.4s, %23.s[0] \n" "ext v8.16b, v8.16b, v9.16b, #8 \n" "ext v9.16b, v14.16b, v15.16b, #4 \n" "fmla v6.4s, v10.4s, %18.s[1] \n" "fmla v7.4s, v10.4s, %21.s[1] \n" "fmla v12.4s, v11.4s, %20.s[2] \n" "fmla v13.4s, v11.4s, %23.s[2] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v14.4s, v15.4s}, [%6] \n" // r1 "add %6, %6, #16 \n" "fmla v6.4s, v8.4s, %18.s[2] \n" "fmla v7.4s, v8.4s, %21.s[2] \n" "fmla v12.4s, v9.4s, %20.s[1] \n" "fmla v13.4s, v9.4s, %23.s[1] \n" "ext v10.16b, v14.16b, v15.16b, #4 \n" "fmla v6.4s, v14.4s, %19.s[0] \n" "fmla v7.4s, v14.4s, %22.s[0] \n" "fmla v12.4s, v14.4s, %18.s[0] \n" "fmla v13.4s, v14.4s, %21.s[0] \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "fmla v6.4s, v10.4s, %19.s[1] \n" "fmla v7.4s, v10.4s, %22.s[1] \n" "fmla v12.4s, v10.4s, %18.s[1] \n" "fmla v13.4s, v10.4s, %21.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v8.4s, v9.4s}, [%7] \n" // r2 "add %7, %7, #16 \n" "fmla v6.4s, v11.4s, %19.s[2] \n" "fmla v7.4s, v11.4s, %22.s[2] \n" "fmla v12.4s, v11.4s, %18.s[2] \n" "fmla v13.4s, v11.4s, %21.s[2] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "fmla v6.4s, v8.4s, %20.s[0] \n" "fmla v7.4s, v8.4s, %23.s[0] \n" "fmla v12.4s, v8.4s, %19.s[0] \n" "fmla v13.4s, v8.4s, %22.s[0] \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %20.s[1] \n" "fmla v7.4s, v10.4s, %23.s[1] \n" "fmla v12.4s, v10.4s, %19.s[1] \n" "fmla v13.4s, v10.4s, %22.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n" // r0 "add %5, %5, #16 \n" "fmla v6.4s, v11.4s, %20.s[2] \n" "fmla v7.4s, v11.4s, %23.s[2] \n" "fmla v12.4s, v11.4s, %19.s[2] \n" "fmla v13.4s, v11.4s, %22.s[2] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v14.4s, v15.4s}, [%8] \n" // r3 "add %8, %8, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "st1 {v12.4s}, [%3], #16 \n" "st1 {v13.4s}, [%4], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %5, %5, #16 \n" "sub %8, %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n" // r0 "add %5, #16 \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n" // r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q14, q15, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :64] \n" // _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :64] \n" // _sum1 "vmla.f32 q6, q8, %e18[0] \n" "vmla.f32 q7, q8, %e21[0] \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3] \n" // _sum0n "pld [%4, #128] \n" "vld1.f32 {d26-d27}, [%4] \n" // _sum1n "vmla.f32 q12, q14, %e20[0] \n" "vmla.f32 q13, q14, %e23[0] \n" "vext.32 q8, q8, q9, #2 \n" "vext.32 q9, q14, q15, #1 \n" "vmla.f32 q6, q10, %e18[1] \n" "vmla.f32 q7, q10, %e21[1] \n" "vmla.f32 q12, q11, %f20[0] \n" "vmla.f32 q13, q11, %f23[0] \n" "pld [%6, #192] \n" "vld1.f32 {d28-d30}, [%6] \n" // r1 "add %6, #16 \n" "vmla.f32 q6, q8, %f18[0] \n" "vmla.f32 q7, q8, %f21[0] \n" "vmla.f32 q12, q9, %e20[1] \n" "vmla.f32 q13, q9, %e23[1] \n" "vext.32 q10, q14, q15, #1 \n" "vmla.f32 q6, q14, %e19[0] \n" "vmla.f32 q7, q14, %e22[0] \n" "vmla.f32 q12, q14, %e18[0] \n" "vmla.f32 q13, q14, %e21[0] \n" "vext.32 q11, q14, q15, #2 \n" "vmla.f32 q6, q10, %e19[1] \n" "vmla.f32 q7, q10, %e22[1] \n" "vmla.f32 q12, q10, %e18[1] \n" "vmla.f32 q13, q10, %e21[1] \n" "pld [%7, #192] \n" "vld1.f32 {d16-d18}, [%7 :64] \n" // r2 "add %7, #16 \n" "vmla.f32 q6, q11, %f19[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q12, q11, %f18[0] \n" "vmla.f32 q13, q11, %f21[0] \n" "vext.32 q10, q8, q9, #1 \n" "vmla.f32 q6, q8, %e20[0] \n" "vmla.f32 q7, q8, %e23[0] \n" "vmla.f32 q12, q8, %e19[0] \n" "vmla.f32 q13, q8, %e22[0] \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e20[1] \n" "vmla.f32 q7, q10, %e23[1] \n" "vmla.f32 q12, q10, %e19[1] \n" "vmla.f32 q13, q10, %e22[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n" // r0 "add %5, #16 \n" "vmla.f32 q6, q11, %f20[0] \n" "vmla.f32 q7, q11, %f23[0] \n" "vmla.f32 q12, q11, %f19[0] \n" "vmla.f32 q13, q11, %f22[0] \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n" // r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vst1.f32 {d12-d13}, [%1 : 64]!\n" "vst1.f32 {d14-d15}, [%2 : 64]!\n" "vext.32 q11, q14, q15, #2 \n" "vst1.f32 {d24-d25}, [%3]! \n" "vst1.f32 {d26-d27}, [%4]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %5, #16 \n" "sub %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); float32x4_t _sum0n = vmulq_f32(_r10, _k00); float32x4_t _sum1n = vmulq_f32(_r10, _k10); _sum0n = vmlaq_f32(_sum0n, _r20, _k03); _sum1n = vmlaq_f32(_sum1n, _r20, _k13); _sum0n = vmlaq_f32(_sum0n, _r30, _k06); _sum1n = vmlaq_f32(_sum1n, _r30, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); _sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3); _sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); *outptr0n = vaddvq_f32(_sum0n); *outptr1n = vaddvq_f32(_sum1n); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n)); float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); *outptr0n = vget_lane_f32(_ss01n, 0); *outptr1n = vget_lane_f32(_ss01n, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum0n = 0.f; float sum1 = 0.f; float sum1n = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; sum0n += r1[0] * k0[0]; sum0n += r1[1] * k0[1]; sum0n += r1[2] * k0[2]; sum0n += r2[0] * k0[3]; sum0n += r2[1] * k0[4]; sum0n += r2[2] * k0[5]; sum0n += r3[0] * k0[6]; sum0n += r3[1] * k0[7]; sum0n += r3[2] * k0[8]; sum1n += r1[0] * k1[0]; sum1n += r1[1] * k1[1]; sum1n += r1[2] * k1[2]; sum1n += r2[0] * k1[3]; sum1n += r2[1] * k1[4]; sum1n += r2[2] * k1[5]; sum1n += r3[0] * k1[6]; sum1n += r3[1] * k1[7]; sum1n += r3[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; *outptr0n += sum0n; *outptr1n += sum1n; #endif // __ARM_NEON r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr0n++; outptr1n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr1 += outw; outptr0n += outw; outptr1n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r0 "add %3, %3, #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n" // _sum0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n" // _sum1 "fmul v14.4s, v8.4s, %12.s[0] \n" "fmul v15.4s, v8.4s, %15.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %12.s[1] \n" "fmla v7.4s, v10.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" // r1 "add %4, %4, #16 \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "fmla v15.4s, v11.4s, %15.s[2] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v14.4s, v10.4s, %13.s[1] \n" "fmla v15.4s, v10.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n" // r2 "add %5, %5, #16 \n" "fmla v6.4s, v11.4s, %13.s[2] \n" "fmla v7.4s, v11.4s, %16.s[2] \n" "fmla v14.4s, v8.4s, %14.s[0] \n" "fmla v15.4s, v8.4s, %17.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %14.s[1] \n" "fmla v7.4s, v10.4s, %17.s[1] \n" "fmla v14.4s, v11.4s, %14.s[2] \n" "fmla v15.4s, v11.4s, %17.s[2] \n" "fadd v6.4s, v6.4s, v14.4s \n" "fadd v7.4s, v7.4s, v15.4s \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n" // r0 "add %3, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n" // _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n" // _sum1 "vmul.f32 q14, q8, %e12[0] \n" "vmul.f32 q15, q8, %e15[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e12[1] \n" "vmla.f32 q7, q10, %e15[1] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n" // r1 "add %4, #16 \n" "vmla.f32 q14, q11, %f12[0] \n" "vmla.f32 q15, q11, %f15[0] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q14, q10, %e13[1] \n" "vmla.f32 q15, q10, %e16[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5] \n" // r2 "add %5, #16 \n" "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "vmla.f32 q14, q8, %e14[0] \n" "vmla.f32 q15, q8, %e17[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e14[1] \n" "vmla.f32 q7, q10, %e17[1] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmla.f32 q15, q11, %f17[0] \n" "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0++; r1++; r2++; outptr0++; outptr1++; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9; k1 += 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p * inch * 9; for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k3456 = vld1q_f32(kernel0 + 3); float32x4_t _k6789 = vld1q_f32(kernel0 + 6); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" // r0 "add %3, %3, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n" // _sum "fmla v7.4s, v9.4s, %14.s[0] \n" "fmul v6.4s, v11.4s, %14.s[1] \n" "fmul v13.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v9.4s, v10.4s}, [%4] \n" // r1 "add %4, %4, #16 \n" "fmla v7.4s, v9.4s, %15.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %15.s[1] \n" "fmla v13.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v8.4s}, [%2] \n" // _sum2 "fmla v8.4s, v9.4s, %14.s[0] \n" "fmul v14.4s, v11.4s, %14.s[1] \n" "fmul v15.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v9.4s, v10.4s}, [%5] \n" // r2 "add %5, %5, #16 \n" "fmla v7.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %16.s[1] \n" "fmla v13.4s, v12.4s, %16.s[2] \n" "fmla v8.4s, v9.4s, %15.s[0] \n" "fmla v14.4s, v11.4s, %15.s[1] \n" "fmla v15.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v9.4s, v10.4s}, [%6] \n" // r3 "add %6, %6, #16 \n" "fmla v8.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v14.4s, v11.4s, %16.s[1] \n" "fmla v15.4s, v12.4s, %16.s[2] \n" "fadd v7.4s, v7.4s, v6.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" // r0 "fadd v8.4s, v8.4s, v14.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v8.4s, v8.4s, v15.4s \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "add %3, %3, #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v8.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %3, %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n" // r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1 :64] \n" // _sum "vmla.f32 q7, q9, %e14[0] \n" "vmul.f32 q6, q11, %e14[1] \n" "vmul.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n" // r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n" // _sum2 "vmla.f32 q8, q9, %e14[0] \n" "vmul.f32 q14, q11, %e14[1] \n" "vmul.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n" // r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n" // r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n" // r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); float32x4_t _sum2 = vmulq_f32(_r10, _k0123); _sum2 = vmlaq_f32(_sum2, _r20, _k3456); _sum2 = vmlaq_f32(_sum2, _r30, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); _sum2 = vsetq_lane_f32(*outptr2, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r0 "add %2, %2, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n" // _sum "fmla v7.4s, v8.4s, %10.s[0] \n" "fmul v13.4s, v10.4s, %10.s[1] \n" "fmul v14.4s, v11.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r1 "add %3, %3, #16 \n" "fmla v7.4s, v8.4s, %11.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %11.s[1] \n" "fmla v14.4s, v11.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" // r2 "add %4, %4, #16 \n" "fmla v7.4s, v8.4s, %12.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %12.s[1] \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r0 "add %2, %2, #16 \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v7.4s, v7.4s, v14.4s \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "st1 {v7.4s}, [%1], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n" // r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n" // _sum "vmla.f32 q7, q8, %e10[0] \n" "vmul.f32 q13, q10, %e10[1] \n" "vmul.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n" // r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n" // r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n" // r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd63_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt) { kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd4 // interleave weights int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; Mat kernel_tm2(8 * 8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4); #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; float* ktm2 = kernel_tm2.channel(pp); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); int q = 0; #if __ARM_NEON && __aarch64__ for (; q + 3 < inch; q += 4) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q + 1); const float* k02 = kernel0_tm.row(q + 2); const float* k03 = kernel0_tm.row(q + 3); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q + 1); const float* k12 = kernel1_tm.row(q + 2); const float* k13 = kernel1_tm.row(q + 3); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q + 1); const float* k22 = kernel2_tm.row(q + 2); const float* k23 = kernel2_tm.row(q + 3); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q + 1); const float* k32 = kernel3_tm.row(q + 2); const float* k33 = kernel3_tm.row(q + 3); for (int r = 0; r < 16; r++) { // split into two asm blocks for gcc reject over 30 oprands :( asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k02), // %3 "=r"(k03), // %4 "=r"(k10), // %5 "=r"(k11), // %6 "=r"(k12), // %7 "=r"(k13) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k02), "4"(k03), "5"(k10), "6"(k11), "7"(k12), "8"(k13) : "cc", "memory", "v0", "v1", "v2", "v3"); asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k20), // %1 "=r"(k21), // %2 "=r"(k22), // %3 "=r"(k23), // %4 "=r"(k30), // %5 "=r"(k31), // %6 "=r"(k32), // %7 "=r"(k33) // %8 : "0"(ktm2), "1"(k20), "2"(k21), "3"(k22), "4"(k23), "5"(k30), "6"(k31), "7"(k32), "8"(k33) : "cc", "memory", "v0", "v1", "v2", "v3"); } } #endif // __ARM_NEON && __aarch64__ for (; q + 1 < inch; q += 2) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q + 1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q + 1); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q + 1); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q + 1); for (int r = 0; r < 16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%7], #16 \n" "ld1 {v1.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "v0", "v1"); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vld1.f32 {d2-d3}, [%6 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%7 :128]! \n" "vld1.f32 {d2-d3}, [%8 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "q0", "q1"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { ktm2[0 + m] = k00[m]; ktm2[4 + m] = k01[m]; ktm2[8 + m] = k10[m]; ktm2[12 + m] = k11[m]; ktm2[16 + m] = k20[m]; ktm2[20 + m] = k21[m]; ktm2[24 + m] = k30[m]; ktm2[28 + m] = k31[m]; } k00 += 4; k01 += 4; k10 += 4; k11 += 4; k20 += 4; k21 += 4; k30 += 4; k31 += 4; ktm2 += 32; #endif // __ARM_NEON } } for (; q < inch; q++) { const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); for (int r = 0; r < 16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "v0", "v1"); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "q0", "q1"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { ktm2[0 + m] = k00[m]; ktm2[4 + m] = k10[m]; ktm2[8 + m] = k20[m]; ktm2[12 + m] = k30[m]; } k00 += 4; k10 += 4; k20 += 4; k30 += 4; ktm2 += 16; #endif // __ARM_NEON } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8 * 8 * inch * (p - remain_outch_start); const Mat kernel0_tm = kernel_tm.channel(p); int q = 0; for (; q < inch; q++) { const float* k00 = kernel0_tm.row(q); for (int r = 0; r < 16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "v0"); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d0-d1}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "q0"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { ktm2[m] = k00[m]; } k00 += 4; ktm2 += 4; #endif // __ARM_NEON } } } kernel_tm = kernel_tm2; } static void conv3x3s1_winograd63_transform_kernel_neon5(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt) { kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd5 // interleave weights // Mat kernel_tm2(8*8, inch, outch); // Mat kernel_tm2(inch, 64, outch); #if __ARM_NEON && __aarch64__ Mat kernel_tm2(8 * 4 * (inch / 4) + 8 * (inch % 4), 64, outch / 8 + (outch % 8) / 4 + outch % 4); #else Mat kernel_tm2(4 * 4 * (inch / 4) + 4 * (inch % 4), 64, outch / 4 + outch % 4); #endif int p = 0; #if __aarch64__ for (; p + 7 < outch; p += 8) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); const Mat kernel4_tm = kernel_tm.channel(p + 4); const Mat kernel5_tm = kernel_tm.channel(p + 5); const Mat kernel6_tm = kernel_tm.channel(p + 6); const Mat kernel7_tm = kernel_tm.channel(p + 7); Mat ktm2 = kernel_tm2.channel(p / 8); for (int r = 0; r < 64; r++) { float* ktm2p = ktm2.row(r); for (int q = 0; q < inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); const float* ktm4_0 = kernel4_tm.row(q); const float* ktm5_0 = kernel5_tm.row(q); const float* ktm6_0 = kernel6_tm.row(q); const float* ktm7_0 = kernel7_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p[4] = ktm4_0[r]; ktm2p[5] = ktm5_0[r]; ktm2p[6] = ktm6_0[r]; ktm2p[7] = ktm7_0[r]; ktm2p += 8; } } } #endif // __aarch64__ for (; p + 3 < outch; p += 4) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); #if __ARM_NEON && __aarch64__ Mat ktm2 = kernel_tm2.channel(p / 8 + (p % 8) / 4); #else Mat ktm2 = kernel_tm2.channel(p / 4); #endif for (int r = 0; r < 64; r++) { float* ktm2p = ktm2.row(r); for (int q = 0; q < inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p += 4; } } } for (; p < outch; p++) { const Mat kernel0_tm = kernel_tm.channel(p); #if __ARM_NEON && __aarch64__ Mat ktm2 = kernel_tm2.channel(p / 8 + (p % 8) / 4 + p % 4); #else Mat ktm2 = kernel_tm2.channel(p / 4 + p % 4); #endif for (int r = 0; r < 64; r++) { float* ktm2p = ktm2.row(r); for (int q = 0; q < inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p += 1; } } } kernel_tm = kernel_tm2; } static void conv3x3s1_winograd63_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(4, 16 * w_tm / 8 * h_tm / 8, inch, 4u, opt.workspace_allocator); const int tiles = w_tm / 8 * h_tm / 8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff + 4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w * 2; const float* r3 = r0 + w * 3; // the assembly block for armv7 input transform requires 13 general registers // old gcc may fail to allocate register on debug build without -fomit-frame-pointer // so, fallback to intrinsic version for armv7 debug build --- nihui #if __aarch64__ || !defined(NDEBUG) for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0 + 4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1 + 4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2 + 4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3 + 4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w * 4; r1 += w * 4; r2 += w * 4; r3 += w * 4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm / 8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 2); float* r0_tm1_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 3); float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 4); float* r0_tm2_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 5); float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 6); float* r0_tm3_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 7); for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2 + 4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3); float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3); float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3); float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3); t0 += 8 * 4; t1 += 8 * 4; t2 += 8 * 4; t3 += 8 * 4; r0_tm0_0 += img0_tm.w * tiles * 2 * 4; r0_tm0_4 += img0_tm.w * tiles * 2 * 4; r0_tm1_0 += img0_tm.w * tiles * 2 * 4; r0_tm1_4 += img0_tm.w * tiles * 2 * 4; r0_tm2_0 += img0_tm.w * tiles * 2 * 4; r0_tm2_4 += img0_tm.w * tiles * 2 * 4; r0_tm3_0 += img0_tm.w * tiles * 2 * 4; r0_tm3_4 += img0_tm.w * tiles * 2 * 4; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m] "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m] "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm / 8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 2); float* r0_tm1_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 3); float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 4); float* r0_tm2_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 5); float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 6); float* r0_tm3_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 7); int step = img0_tm.w * tiles * 2 * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%2], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4], %26 \n" "vst1.f32 {d17[1]}, [%6], %26 \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "sub %0, %0, #12 \n" "sub %2, %2, #12 \n" "sub %4, %4, #12 \n" "sub %6, %6, #12 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1], %26 \n" "vst1.f32 {d4-d5}, [%3], %26 \n" "vst1.f32 {d6-d7}, [%5], %26 \n" "vst1.f32 {d12-d13}, [%7], %26 \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0] \n" "vst1.f32 {d16[1]}, [%2] \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4] \n" "vst1.f32 {d17[1]}, [%6] \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1] \n" "vst1.f32 {d4-d5}, [%3] \n" "vst1.f32 {d6-d7}, [%5] \n" "vst1.f32 {d12-d13}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm0_4), // %1 "=r"(r0_tm1_0), // %2 "=r"(r0_tm1_4), // %3 "=r"(r0_tm2_0), // %4 "=r"(r0_tm2_4), // %5 "=r"(r0_tm3_0), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm0_4), "2"(r0_tm1_0), "3"(r0_tm1_4), "4"(r0_tm2_0), "5"(r0_tm2_4), "6"(r0_tm3_0), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m = 0; m < 8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm_4 = img0_tm.row(i * w_tm / 8 + j + tiles); for (int m = 0; m < 8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_0[1] = tmp12a + tmp12b; r0_tm_0[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_0[3] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_4[1] = tmp56a + tmp56b; r0_tm_4[2] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 2; r0_tm_4 += img0_tm.w * tiles * 2; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(4, 16 * w_tm / 8 * h_tm / 8, outch, 4u, opt.workspace_allocator); const int tiles = h_tm / 8 * w_tm / 8; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); const float* ktm = kernel_tm.channel(pp); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; #if __ARM_NEON && __aarch64__ for (; q + 3 < inch; q += 4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q + 1); const float* r2 = bottom_blob_tm.channel(q + 2); const float* r3 = bottom_blob_tm.channel(q + 3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; asm volatile( "mov w0, #16 \n" // w0 = r = 16 "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n" // v0 v1 v2 v3 = _k00 _k01 _k02 _k03 "prfm pldl1keep, [%8, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n" // v4 v5 v6 v7 = _k10 _k11 _k12 _k13 "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" // v8 v9 v10 v11 = _k20 _k21 _k22 _k23 "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" // v12 v13 v14 v15 = _k30 _k31 _k32 _k33 // tile loop "lsr w1, %w18, #2 \n" // w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n" // "ld1 {v16.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "add x4, %0, #16 \n" // x4 = %0 next "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "add x5, %1, #16 \n" // x5 = %1 next "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "add x6, %2, #16 \n" // x6 = %2 next "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "add x7, %3, #16 \n" // x7 = %3 next "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "add %0, %0, #32 \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "add %1, %1, #32 \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "add %2, %2, #32 \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" "add %3, %3, #32 \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "st1 {v24.4s}, [x4] \n" "add x4, x4, #32 \n" "fmla v20.4s, v16.4s, v0.4s \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v22.4s, v16.4s, v8.4s \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "st1 {v25.4s}, [x5] \n" "add x5, x5, #32 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "st1 {v26.4s}, [x6] \n" "add x6, x6, #32 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "st1 {v27.4s}, [x7] \n" "add x7, x7, #32 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "st1 {v21.4s}, [%1] \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "st1 {v22.4s}, [%2] \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "st1 {v23.4s}, [%3] \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" "st1 {v24.4s}, [x4], #16 \n" "mov %0, x4 \n" "st1 {v25.4s}, [x5], #16 \n" "mov %1, x5 \n" "subs w1, w1, #1 \n" "st1 {v26.4s}, [x6], #16 \n" "mov %2, x6 \n" "st1 {v27.4s}, [x7], #16 \n" "mov %3, x7 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w18, #3 \n" // w1 = remain = tiles & 3 "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" "st1 {v20.4s}, [%0], #16 \n" "st1 {v21.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v22.4s}, [%2], #16 \n" "st1 {v23.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(ktm) // %8 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(ktm), "r"(tiles) // %18 : "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __ARM_NEON && __aarch64__ for (; q + 1 < inch; q += 2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q + 1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n" // w0 = r = 16 "0: \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4s, v1.4s}, [%6], #32 \n" // v0 v1 = _k00 _k01 "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.4s, v3.4s}, [%6], #32 \n" // v2 v3 = _k10 _k11 "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n" // v4 v5 = _k20 _k21 "prfm pldl1keep, [%6, #256] \n" "ld1 {v6.4s, v7.4s}, [%6], #32 \n" // v6 v7 = _k30 _k31 // tile loop "lsr w1, %w14, #2 \n" // w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w14, #3 \n" // w1 = remain = tiles & 3 "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21"); #else asm volatile( "mov r0, #16 \n" // r0 = r = 16 "0: \n" "pld [%6, #256] \n" "vld1.f32 {d0-d3}, [%6 :128]! \n" // q0 q1 = _k00 _k01 "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6 :128]! \n" // q2 q3 = _k10 _k11 "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n" // q4 q5 = _k20 _k21 "pld [%6, #256] \n" "vld1.f32 {d12-d15}, [%6 :128]! \n" // q6 q7 = _k30 _k31 // tile loop "lsr r1, %14, #2 \n" // r1 = nn = tiles >> 2 "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "1: \n" "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and r1, %14, #3 \n" // r1 = remain = tiles & 3 "cmp r1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 3b \n" //END remain loop "4: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"); #endif // __aarch64__ #else for (int r = 0; r < 16; r++) { for (int t = 0; t < tiles; t++) { for (int m = 0; m < 4; m++) { output0_tm[m] += r0[m] * ktm[0 + m]; output0_tm[m] += r1[m] * ktm[4 + m]; output1_tm[m] += r0[m] * ktm[8 + m]; output1_tm[m] += r1[m] * ktm[12 + m]; output2_tm[m] += r0[m] * ktm[16 + m]; output2_tm[m] += r1[m] * ktm[20 + m]; output3_tm[m] += r0[m] * ktm[24 + m]; output3_tm[m] += r1[m] * ktm[28 + m]; } r0 += 4; r1 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 32; } #endif // __ARM_NEON } for (; q < inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n" // w0 = r = 16 "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n" // v0 v1 = _k00 _k10 "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n" // v2 v3 = _k20 _k30 // tile loop "mov w1, %w12 \n" // w1 = tiles "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v17.4s}, [%0] \n" "fmla v17.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "fmla v18.4s, v16.4s, v1.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "fmla v19.4s, v16.4s, v2.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v20.4s, v16.4s, v3.4s \n" "st1 {v17.4s}, [%0], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v19.4s}, [%2], #16 \n" "st1 {v20.4s}, [%3], #16 \n" "bne 1b \n" //END tile loop "2: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20"); #else asm volatile( "mov r0, #16 \n" // r0 = r = 16 "0: \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n" // q0 q1 = _k00 _k10 "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" // q2 q3 = _k20 _k30 // tile loop "mov r1, %12 \n" // r1 = tiles "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q1 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q2 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q3 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" //END tile loop "2: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"); #endif // __aarch64__ #else for (int r = 0; r < 16; r++) { for (int t = 0; t < tiles; t++) { for (int m = 0; m < 4; m++) { output0_tm[m] += r0[m] * ktm[0 + m]; output1_tm[m] += r0[m] * ktm[4 + m]; output2_tm[m] += r0[m] * ktm[8 + m]; output3_tm[m] += r0[m] * ktm[12 + m]; } r0 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 16; } #endif // __ARM_NEON } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8 * 8 * inch * (p - remain_outch_start); out0_tm.fill(0.f); int q = 0; for (; q < inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; for (int r = 0; r < 16; r++) { #if __ARM_NEON float32x4_t _k00 = vld1q_f32(ktm); ktm += 4; #endif // __ARM_NEON // tile for (int i = 0; i < tiles; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v17.4s, %4.4s \n" "st1 {v16.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "v16", "v17"); #else asm volatile( "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128]! \n" // q9 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q9, %q4 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "q8", "q9"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { output0_tm[m] += r0[m] * ktm[m]; } r0 += 4; output0_tm += 4; #endif // __ARM_NEON } #if !__ARM_NEON ktm += 4; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = {4.f, 8.f, 16.f, 32.f}; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { #if __ARM_NEON const float* output0_tm0_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm0_4 = out0_tm.row(i * w_tm / 8 + j + tiles); const float* output0_tm1_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 2); const float* output0_tm1_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 3); const float* output0_tm2_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 4); const float* output0_tm2_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 5); const float* output0_tm3_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 6); const float* output0_tm3_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 7); #if __aarch64__ for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0); float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4); float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0); float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4); float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0); float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4); float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0); float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4); float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123); float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567); float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123); float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567); // no vswp intrinsic :( float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1])); float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1])); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0_0 += out0_tm.w * tiles * 2 * 4; output0_tm0_4 += out0_tm.w * tiles * 2 * 4; output0_tm1_0 += out0_tm.w * tiles * 2 * 4; output0_tm1_4 += out0_tm.w * tiles * 2 * 4; output0_tm2_0 += out0_tm.w * tiles * 2 * 4; output0_tm2_4 += out0_tm.w * tiles * 2 * 4; output0_tm3_0 += out0_tm.w * tiles * 2 * 4; output0_tm3_4 += out0_tm.w * tiles * 2 * 4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m = 0; m + 1 < 6; m += 2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8 * 2; t1 += 8 * 2; output0 += outw * 2; output1 += outw * 2; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; int step = out0_tm.w * tiles * 2 * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d17}, [%2], %21 \n" "vld1.f32 {d18-d19}, [%3], %21 \n" "vld1.f32 {d20-d21}, [%4], %21 \n" "vld1.f32 {d22-d23}, [%5], %21 \n" "vld1.f32 {d24-d25}, [%6], %21 \n" "vld1.f32 {d26-d27}, [%7], %21 \n" "vld1.f32 {d28-d29}, [%8], %21 \n" "vld1.f32 {d30-d31}, [%9], %21 \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 "vld1.f32 {d16-d17}, [%2] \n" "vld1.f32 {d18-d19}, [%3] \n" "vld1.f32 {d20-d21}, [%4] \n" "vld1.f32 {d22-d23}, [%5] \n" "vld1.f32 {d24-d25}, [%6] \n" "vld1.f32 {d26-d27}, [%7] \n" "vld1.f32 {d28-d29}, [%8] \n" "vld1.f32 {d30-d31}, [%9] \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm0_4), // %3 "=r"(output0_tm1_0), // %4 "=r"(output0_tm1_4), // %5 "=r"(output0_tm2_0), // %6 "=r"(output0_tm2_4), // %7 "=r"(output0_tm3_0), // %8 "=r"(output0_tm3_4) // %9 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm0_4), "4"(output0_tm1_0), "5"(output0_tm1_4), "6"(output0_tm2_0), "7"(output0_tm2_4), "8"(output0_tm3_0), "9"(output0_tm3_4), "w"(_coeff), // %20 "r"(step) // %21 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw * 2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm_4 = out0_tm.row(i * w_tm / 8 + j + tiles); for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_0[1] + output0_tm_0[2]; float tmp135a = output0_tm_0[1] - output0_tm_0[2]; float tmp024b = output0_tm_0[3] + output0_tm_4[0]; float tmp135b = output0_tm_0[3] - output0_tm_4[0]; float tmp024c = output0_tm_4[1] + output0_tm_4[2]; float tmp135c = output0_tm_4[1] - output0_tm_4[2]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 2; output0_tm_4 += out0_tm.w * tiles * 2; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd63_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(1, 64 * tiles, inch, 4u, opt.workspace_allocator); // bottom_blob_tm.create(inch, tiles, 64); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff + 4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w * 2; const float* r3 = r0 + w * 3; #if __aarch64__ for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0 + 4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1 + 4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2 + 4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3 + 4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w * 4; r1 += w * 4; r2 += w * 4; r3 += w * 4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm1 = img0_tm.row(i * w_tm / 8 + j + tiles * 8); float* r0_tm2 = img0_tm.row(i * w_tm / 8 + j + tiles * 16); float* r0_tm3 = img0_tm.row(i * w_tm / 8 + j + tiles * 24); for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2 + 4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3); t0 += 8 * 4; t1 += 8 * 4; t2 += 8 * 4; t3 += 8 * 4; r0_tm0 += img0_tm.w * tiles * 25; r0_tm1 += img0_tm.w * tiles * 25; r0_tm2 += img0_tm.w * tiles * 25; r0_tm3 += img0_tm.w * tiles * 25; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; int stepw = w * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%4], %18 \n" "vld1.f32 {d20-d23}, [%5], %18 \n" "vld1.f32 {d24-d27}, [%6], %18 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%7], %18 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f17[1] \n" "vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f17[0] \n" "vmls.f32 q5, q14, %f17[0] \n" "vst1.f32 {d4-d5}, [%0] \n" // tmp[0][m] "add %0, %0, #128 \n" "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e16[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f16[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e16[0] \n" "vmla.f32 q3, q11, %f16[1] \n" "vst1.f32 {d16-d17}, [%1] \n" // tmp[1][m] "add %1, %1, #128 \n" "vmla.f32 q4, q6, %e17[1] \n" "vmla.f32 q5, q11, %e16[1] \n" "vst1.f32 {d18-d19}, [%2] \n" // tmp[2][m] "add %2, %2, #128 \n" "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3] \n" // tmp[3][m] "add %3, %3, #128 \n" "vst1.f32 {d18-d19}, [%0] \n" // tmp[4][m] "sub %0, %0, #112 \n" "vmla.f32 q6, q7, %f17[1] \n" "vst1.f32 {d4-d5}, [%1] \n" // tmp[5][m] "sub %1, %1, #112 \n" "vst1.f32 {d6-d7}, [%2] \n" // tmp[6][m] "sub %2, %2, #112 \n" "vst1.f32 {d12-d13}, [%3] \n" // tmp[7][m] "sub %3, %3, #112 \n" // loop1 "vld1.f32 {d16-d19}, [%4] \n" "vld1.f32 {d20-d23}, [%5] \n" "vld1.f32 {d24-d27}, [%6] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%7] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f17[1] \n" "vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f17[0] \n" "vmls.f32 q5, q14, %f17[0] \n" "vst1.f32 {d4-d5}, [%0] \n" // tmp[0][m] "add %0, %0, #128 \n" "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e16[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f16[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e16[0] \n" "vmla.f32 q3, q11, %f16[1] \n" "vst1.f32 {d16-d17}, [%1] \n" // tmp[1][m] "add %1, %1, #128 \n" "vmla.f32 q4, q6, %e17[1] \n" "vmla.f32 q5, q11, %e16[1] \n" "vst1.f32 {d18-d19}, [%2] \n" // tmp[2][m] "add %2, %2, #128 \n" "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3] \n" // tmp[3][m] "add %3, %3, #128 \n" "vst1.f32 {d18-d19}, [%0] \n" // tmp[4][m] "vmla.f32 q6, q7, %f17[1] \n" "vst1.f32 {d4-d5}, [%1] \n" // tmp[5][m] "vst1.f32 {d6-d7}, [%2] \n" // tmp[6][m] "vst1.f32 {d12-d13}, [%3] \n" // tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3) // %7 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "w"(_coeff0), // %16 "w"(_coeff1), // %17 "r"(stepw) // %18 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 8); float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 16); float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 24); int step = img0_tm.w * tiles * 4; int step2 = img0_tm.w * tiles * 25 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%4] \n" "add %4, %4, #128 \n" "vld1.f32 {d20-d23}, [%5] \n" "add %5, %5, #128 \n" "vld1.f32 {d24-d27}, [%6] \n" "add %6, %6, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%7] \n" "add %7, %7, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f17[1] \n" "vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f17[0] \n" "vmls.f32 q5, q14, %f17[0] \n" "vst1.f32 {d4[0]}, [%0], %18 \n" "vst1.f32 {d4[1]}, [%1], %18 \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%2], %18 \n" "vst1.f32 {d5[1]}, [%3], %18 \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e16[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f16[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e16[0] \n" "vmla.f32 q3, q11, %f16[1] \n" "vst1.f32 {d16[0]}, [%0], %18 \n" "vst1.f32 {d16[1]}, [%1], %18 \n" "vmla.f32 q4, q6, %e17[1] \n" "vst1.f32 {d17[0]}, [%2], %18 \n" "vst1.f32 {d17[1]}, [%3], %18 \n" "vmla.f32 q5, q11, %e16[1] \n" "vst1.f32 {d18[0]}, [%0], %18 \n" "vst1.f32 {d18[1]}, [%1], %18 \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%2], %18 \n" "vst1.f32 {d19[1]}, [%3], %18 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vst1.f32 {d16[0]}, [%0], %18 \n" "vst1.f32 {d16[1]}, [%1], %18 \n" "vst1.f32 {d17[0]}, [%2], %18 \n" "vst1.f32 {d17[1]}, [%3], %18 \n" "vadd.f32 q2, q4, q5 \n" "vst1.f32 {d18[0]}, [%0], %18 \n" "vst1.f32 {d18[1]}, [%1], %18 \n" "vst1.f32 {d19[0]}, [%2], %18 \n" "vst1.f32 {d19[1]}, [%3], %18 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d4[0]}, [%0], %18 \n" "vst1.f32 {d4[1]}, [%1], %18 \n" "vst1.f32 {d5[0]}, [%2], %18 \n" "vst1.f32 {d5[1]}, [%3], %18 \n" "vmla.f32 q6, q7, %f17[1] \n" "vst1.f32 {d6[0]}, [%0], %18 \n" "vst1.f32 {d6[1]}, [%1], %18 \n" "vst1.f32 {d7[0]}, [%2], %18 \n" "vst1.f32 {d7[1]}, [%3], %18 \n" "vst1.f32 {d12[0]}, [%0], %19 \n" "vst1.f32 {d12[1]}, [%1], %19 \n" "vst1.f32 {d13[0]}, [%2], %19 \n" "vst1.f32 {d13[1]}, [%3], %19 \n" // loop1 "vld1.f32 {d16-d19}, [%4] \n" "vld1.f32 {d20-d23}, [%5] \n" "vld1.f32 {d24-d27}, [%6] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%7] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f17[1] \n" "vmul.f32 q7, q14, %e17[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f16[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f17[0] \n" "vmls.f32 q5, q14, %f17[0] \n" "vst1.f32 {d4[0]}, [%0], %18 \n" "vst1.f32 {d4[1]}, [%1], %18 \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%2], %18 \n" "vst1.f32 {d5[1]}, [%3], %18 \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e16[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f16[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e16[0] \n" "vmla.f32 q3, q11, %f16[1] \n" "vst1.f32 {d16[0]}, [%0], %18 \n" "vst1.f32 {d16[1]}, [%1], %18 \n" "vmla.f32 q4, q6, %e17[1] \n" "vst1.f32 {d17[0]}, [%2], %18 \n" "vst1.f32 {d17[1]}, [%3], %18 \n" "vmla.f32 q5, q11, %e16[1] \n" "vst1.f32 {d18[0]}, [%0], %18 \n" "vst1.f32 {d18[1]}, [%1], %18 \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%2], %18 \n" "vst1.f32 {d19[1]}, [%3], %18 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vst1.f32 {d16[0]}, [%0], %18 \n" "vst1.f32 {d16[1]}, [%1], %18 \n" "vst1.f32 {d17[0]}, [%2], %18 \n" "vst1.f32 {d17[1]}, [%3], %18 \n" "vadd.f32 q2, q4, q5 \n" "vst1.f32 {d18[0]}, [%0], %18 \n" "vst1.f32 {d18[1]}, [%1], %18 \n" "vst1.f32 {d19[0]}, [%2], %18 \n" "vst1.f32 {d19[1]}, [%3], %18 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d4[0]}, [%0], %18 \n" "vst1.f32 {d4[1]}, [%1], %18 \n" "vst1.f32 {d5[0]}, [%2], %18 \n" "vst1.f32 {d5[1]}, [%3], %18 \n" "vmla.f32 q6, q7, %f17[1] \n" "vst1.f32 {d6[0]}, [%0], %18 \n" "vst1.f32 {d6[1]}, [%1], %18 \n" "vst1.f32 {d7[0]}, [%2], %18 \n" "vst1.f32 {d7[1]}, [%3], %18 \n" "vst1.f32 {d12[0]}, [%0] \n" "vst1.f32 {d12[1]}, [%1] \n" "vst1.f32 {d13[0]}, [%2] \n" "vst1.f32 {d13[1]}, [%3] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm1_0), // %1 "=r"(r0_tm2_0), // %2 "=r"(r0_tm3_0), // %3 "=r"(t0), // %4 "=r"(t1), // %5 "=r"(t2), // %6 "=r"(t3) // %7 : "0"(r0_tm0_0), "1"(r0_tm1_0), "2"(r0_tm2_0), "3"(r0_tm3_0), "4"(t0), "5"(t1), "6"(t2), "7"(t3), "w"(_coeff0), // %16 "w"(_coeff1), // %17 "r"(step), // %18 "r"(step2) // %19 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m = 0; m < 8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm_1 = img0_tm.row(i * w_tm / 8 + j + tiles); float* r0_tm_2 = img0_tm.row(i * w_tm / 8 + j + tiles * 2); float* r0_tm_3 = img0_tm.row(i * w_tm / 8 + j + tiles * 3); float* r0_tm_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 4); float* r0_tm_5 = img0_tm.row(i * w_tm / 8 + j + tiles * 5); float* r0_tm_6 = img0_tm.row(i * w_tm / 8 + j + tiles * 6); float* r0_tm_7 = img0_tm.row(i * w_tm / 8 + j + tiles * 7); for (int m = 0; m < 8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_1[0] = tmp12a + tmp12b; r0_tm_2[0] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_3[0] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_5[0] = tmp56a + tmp56b; r0_tm_6[0] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 8; r0_tm_1 += img0_tm.w * tiles * 8; r0_tm_2 += img0_tm.w * tiles * 8; r0_tm_3 += img0_tm.w * tiles * 8; r0_tm_4 += img0_tm.w * tiles * 8; r0_tm_5 += img0_tm.w * tiles * 8; r0_tm_6 += img0_tm.w * tiles * 8; r0_tm_7 += img0_tm.w * tiles * 8; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // permute // bottom_blob_tm.create(1, 64 * tiles, inch); // Mat bottom_blob_tm2(inch, tiles, 64); Mat bottom_blob_tm2(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { float* tm2p = tm2.row(i / 8); const float* r0 = bottom_blob_tm; r0 += r * tiles + i; for (int q = 0; q < inch; q++) { #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0 + 4); vst1q_f32(tm2p, _r0); vst1q_f32(tm2p + 4, _r0n); #else tm2p[0] = r0[0]; tm2p[1] = r0[1]; tm2p[2] = r0[2]; tm2p[3] = r0[3]; tm2p[4] = r0[4]; tm2p[5] = r0[5]; tm2p[6] = r0[6]; tm2p[7] = r0[7]; #endif // __ARM_NEON r0 += bottom_blob_tm.cstep; tm2p += 8; } } for (; i + 3 < tiles; i += 4) { float* tm2p = tm2.row(i / 8 + (i % 8) / 4); const float* r0 = bottom_blob_tm; r0 += r * tiles + i; for (int q = 0; q < inch; q++) { #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); vst1q_f32(tm2p, _r0); #else tm2p[0] = r0[0]; tm2p[1] = r0[1]; tm2p[2] = r0[2]; tm2p[3] = r0[3]; #endif // __ARM_NEON r0 += bottom_blob_tm.cstep; tm2p += 4; } } for (; i < tiles; i++) { float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4); const float* r0 = bottom_blob_tm; r0 += r * tiles + i; for (int q = 0; q < inch; q++) { tm2p[0] = r0[0]; r0 += bottom_blob_tm.cstep; tm2p += 1; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(1, 64 * tiles, outch); int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const Mat kernel_tm0 = kernel_tm.channel(p / 8); Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); Mat out4_tm = top_blob_tm.channel(p + 4); Mat out5_tm = top_blob_tm.channel(p + 5); Mat out6_tm = top_blob_tm.channel(p + 6); Mat out7_tm = top_blob_tm.channel(p + 7); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; float* output4_tm = out4_tm; float* output5_tm = out5_tm; float* output6_tm = out6_tm; float* output7_tm = out7_tm; for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { const float* bb2p0 = bb2.row(i / 8); const float* ktm0 = kernel_tm0.row(r); asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" // inch loop "lsr w4, %w20, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(bb2p0), // %8 "=r"(ktm0) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(bb2p0), "9"(ktm0), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4); const float* ktm0 = kernel_tm0.row(r); asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" // inch loop "lsr w4, %w20, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(bb2p0), // %8 "=r"(ktm0) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(bb2p0), "9"(ktm0), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < tiles; i++) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0123 = vdupq_n_f32(0.f); float32x4_t _sum4567 = vdupq_n_f32(0.f); int q = 0; for (; q + 3 < inch; q += 4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm1, _bb2p0, 0); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 1); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm3, _bb2p0, 1); // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm4 = vld1q_f32(ktm0 + 0); float32x4_t _ktm5 = vld1q_f32(ktm0 + 4); float32x4_t _ktm6 = vld1q_f32(ktm0 + 8); float32x4_t _ktm7 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm4, _bb2p0, 2); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm5, _bb2p0, 2); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm6, _bb2p0, 3); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm7, _bb2p0, 3); } for (; q < inch; q++) { float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0); float32x4_t _ktm0123 = vld1q_f32(ktm0 + 0); float32x4_t _ktm4567 = vld1q_f32(ktm0 + 4); _sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0123); _sum4567 = vmlaq_f32(_sum4567, _bb2p0, _ktm4567); bb2p0 += 1; ktm0 += 8; } float sum0 = vgetq_lane_f32(_sum0123, 0); float sum1 = vgetq_lane_f32(_sum0123, 1); float sum2 = vgetq_lane_f32(_sum0123, 2); float sum3 = vgetq_lane_f32(_sum0123, 3); float sum4 = vgetq_lane_f32(_sum4567, 0); float sum5 = vgetq_lane_f32(_sum4567, 1); float sum6 = vgetq_lane_f32(_sum4567, 2); float sum7 = vgetq_lane_f32(_sum4567, 3); output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output4_tm[0] = sum4; output5_tm[0] = sum5; output6_tm[0] = sum6; output7_tm[0] = sum7; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; output4_tm += 1; output5_tm += 1; output6_tm += 1; output7_tm += 1; } } } #endif // __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; #if __ARM_NEON && __aarch64__ const Mat kernel_tm0 = kernel_tm.channel(p / 8 + (p % 8) / 4); #else const Mat kernel_tm0 = kernel_tm.channel(p / 4); #endif Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { const float* bb2p0 = bb2.row(i / 8); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" // inch loop "lsr w4, %w12, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "st1 {v12.4s, v13.4s}, [%2], #32 \n" "st1 {v14.4s, v15.4s}, [%3], #32 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "veor q12, q12, q12 \n" "veor q13, q13, q13 \n" "veor q14, q14, q14 \n" "veor q15, q15, q15 \n" // inch loop "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q11, q5, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0]! \n" "vst1.f32 {d20-d23}, [%1]! \n" "vst1.f32 {d24-d27}, [%2]! \n" "vst1.f32 {d28-d31}, [%3]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0_0 = 0.f; float sum0_1 = 0.f; float sum0_2 = 0.f; float sum0_3 = 0.f; float sum0_4 = 0.f; float sum0_5 = 0.f; float sum0_6 = 0.f; float sum0_7 = 0.f; float sum1_0 = 0.f; float sum1_1 = 0.f; float sum1_2 = 0.f; float sum1_3 = 0.f; float sum1_4 = 0.f; float sum1_5 = 0.f; float sum1_6 = 0.f; float sum1_7 = 0.f; float sum2_0 = 0.f; float sum2_1 = 0.f; float sum2_2 = 0.f; float sum2_3 = 0.f; float sum2_4 = 0.f; float sum2_5 = 0.f; float sum2_6 = 0.f; float sum2_7 = 0.f; float sum3_0 = 0.f; float sum3_1 = 0.f; float sum3_2 = 0.f; float sum3_3 = 0.f; float sum3_4 = 0.f; float sum3_5 = 0.f; float sum3_6 = 0.f; float sum3_7 = 0.f; for (int q = 0; q < inch; q++) { sum0_0 += bb2p0[0] * ktm0[0]; sum0_1 += bb2p0[1] * ktm0[0]; sum0_2 += bb2p0[2] * ktm0[0]; sum0_3 += bb2p0[3] * ktm0[0]; sum0_4 += bb2p0[4] * ktm0[0]; sum0_5 += bb2p0[5] * ktm0[0]; sum0_6 += bb2p0[6] * ktm0[0]; sum0_7 += bb2p0[7] * ktm0[0]; sum1_0 += bb2p0[0] * ktm0[1]; sum1_1 += bb2p0[1] * ktm0[1]; sum1_2 += bb2p0[2] * ktm0[1]; sum1_3 += bb2p0[3] * ktm0[1]; sum1_4 += bb2p0[4] * ktm0[1]; sum1_5 += bb2p0[5] * ktm0[1]; sum1_6 += bb2p0[6] * ktm0[1]; sum1_7 += bb2p0[7] * ktm0[1]; sum2_0 += bb2p0[0] * ktm0[2]; sum2_1 += bb2p0[1] * ktm0[2]; sum2_2 += bb2p0[2] * ktm0[2]; sum2_3 += bb2p0[3] * ktm0[2]; sum2_4 += bb2p0[4] * ktm0[2]; sum2_5 += bb2p0[5] * ktm0[2]; sum2_6 += bb2p0[6] * ktm0[2]; sum2_7 += bb2p0[7] * ktm0[2]; sum3_0 += bb2p0[0] * ktm0[3]; sum3_1 += bb2p0[1] * ktm0[3]; sum3_2 += bb2p0[2] * ktm0[3]; sum3_3 += bb2p0[3] * ktm0[3]; sum3_4 += bb2p0[4] * ktm0[3]; sum3_5 += bb2p0[5] * ktm0[3]; sum3_6 += bb2p0[6] * ktm0[3]; sum3_7 += bb2p0[7] * ktm0[3]; bb2p0 += 8; ktm0 += 4; } output0_tm[0] = sum0_0; output0_tm[1] = sum0_1; output0_tm[2] = sum0_2; output0_tm[3] = sum0_3; output0_tm[4] = sum0_4; output0_tm[5] = sum0_5; output0_tm[6] = sum0_6; output0_tm[7] = sum0_7; output1_tm[0] = sum1_0; output1_tm[1] = sum1_1; output1_tm[2] = sum1_2; output1_tm[3] = sum1_3; output1_tm[4] = sum1_4; output1_tm[5] = sum1_5; output1_tm[6] = sum1_6; output1_tm[7] = sum1_7; output2_tm[0] = sum2_0; output2_tm[1] = sum2_1; output2_tm[2] = sum2_2; output2_tm[3] = sum2_3; output2_tm[4] = sum2_4; output2_tm[5] = sum2_5; output2_tm[6] = sum2_6; output2_tm[7] = sum2_7; output3_tm[0] = sum3_0; output3_tm[1] = sum3_1; output3_tm[2] = sum3_2; output3_tm[3] = sum3_3; output3_tm[4] = sum3_4; output3_tm[5] = sum3_5; output3_tm[6] = sum3_6; output3_tm[7] = sum3_7; output0_tm += 8; output1_tm += 8; output2_tm += 8; output3_tm += 8; #endif // __ARM_NEON } for (; i + 3 < tiles; i += 4) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" // inch loop "lsr w4, %w12, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v10.4s}, [%2], #16 \n" "st1 {v11.4s}, [%3], #16 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" // inch loop "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0]! \n" "vst1.f32 {d18-d19}, [%1]! \n" "vst1.f32 {d20-d21}, [%2]! \n" "vst1.f32 {d22-d23}, [%3]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ #else float sum0_0 = 0.f; float sum0_1 = 0.f; float sum0_2 = 0.f; float sum0_3 = 0.f; float sum1_0 = 0.f; float sum1_1 = 0.f; float sum1_2 = 0.f; float sum1_3 = 0.f; float sum2_0 = 0.f; float sum2_1 = 0.f; float sum2_2 = 0.f; float sum2_3 = 0.f; float sum3_0 = 0.f; float sum3_1 = 0.f; float sum3_2 = 0.f; float sum3_3 = 0.f; for (int q = 0; q < inch; q++) { sum0_0 += bb2p0[0] * ktm0[0]; sum0_1 += bb2p0[1] * ktm0[0]; sum0_2 += bb2p0[2] * ktm0[0]; sum0_3 += bb2p0[3] * ktm0[0]; sum1_0 += bb2p0[0] * ktm0[1]; sum1_1 += bb2p0[1] * ktm0[1]; sum1_2 += bb2p0[2] * ktm0[1]; sum1_3 += bb2p0[3] * ktm0[1]; sum2_0 += bb2p0[0] * ktm0[2]; sum2_1 += bb2p0[1] * ktm0[2]; sum2_2 += bb2p0[2] * ktm0[2]; sum2_3 += bb2p0[3] * ktm0[2]; sum3_0 += bb2p0[0] * ktm0[3]; sum3_1 += bb2p0[1] * ktm0[3]; sum3_2 += bb2p0[2] * ktm0[3]; sum3_3 += bb2p0[3] * ktm0[3]; bb2p0 += 4; ktm0 += 4; } output0_tm[0] = sum0_0; output0_tm[1] = sum0_1; output0_tm[2] = sum0_2; output0_tm[3] = sum0_3; output1_tm[0] = sum1_0; output1_tm[1] = sum1_1; output1_tm[2] = sum1_2; output1_tm[3] = sum1_3; output2_tm[0] = sum2_0; output2_tm[1] = sum2_1; output2_tm[2] = sum2_2; output2_tm[3] = sum2_3; output3_tm[0] = sum3_0; output3_tm[1] = sum3_1; output3_tm[2] = sum3_2; output3_tm[3] = sum3_3; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; #endif // __ARM_NEON } for (; i < tiles; i++) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON float32x4_t _sum0123 = vdupq_n_f32(0.f); int q = 0; for (; q + 3 < inch; q += 4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; #if __aarch64__ _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm1, _bb2p0, 1); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 2); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm3, _bb2p0, 3); #else _sum0123 = vmlaq_lane_f32(_sum0123, _ktm0, vget_low_f32(_bb2p0), 0); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm1, vget_low_f32(_bb2p0), 1); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm2, vget_high_f32(_bb2p0), 0); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm3, vget_high_f32(_bb2p0), 1); #endif // __aarch64__ } for (; q < inch; q++) { float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0); float32x4_t _ktm0 = vld1q_f32(ktm0); _sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0); bb2p0 += 1; ktm0 += 4; } float sum0 = vgetq_lane_f32(_sum0123, 0); float sum1 = vgetq_lane_f32(_sum0123, 1); float sum2 = vgetq_lane_f32(_sum0123, 2); float sum3 = vgetq_lane_f32(_sum0123, 3); #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; for (int q = 0; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[0] * ktm0[1]; sum2 += bb2p0[0] * ktm0[2]; sum3 += bb2p0[0] * ktm0[3]; bb2p0 += 1; ktm0 += 4; } #endif // __ARM_NEON output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { #if __ARM_NEON && __aarch64__ const Mat kernel_tm0 = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else const Mat kernel_tm0 = kernel_tm.channel(p / 4 + p % 4); #endif Mat out0_tm = top_blob_tm.channel(p); float* output0_tm = out0_tm; for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { const float* bb2p0 = bb2.row(i / 8); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" // inch loop "lsr w4, %w6, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4s, v5.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" // inch loop "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // "vld1.f32 {d24-d27}, [%1 :128]! \n" // "vld1.f32 {d28-d31}, [%1 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0]! \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; float sum4 = 0.f; float sum5 = 0.f; float sum6 = 0.f; float sum7 = 0.f; for (int q = 0; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[1] * ktm0[0]; sum2 += bb2p0[2] * ktm0[0]; sum3 += bb2p0[3] * ktm0[0]; sum4 += bb2p0[4] * ktm0[0]; sum5 += bb2p0[5] * ktm0[0]; sum6 += bb2p0[6] * ktm0[0]; sum7 += bb2p0[7] * ktm0[0]; bb2p0 += 8; ktm0 += 1; } output0_tm[0] = sum0; output0_tm[1] = sum1; output0_tm[2] = sum2; output0_tm[3] = sum3; output0_tm[4] = sum4; output0_tm[5] = sum5; output0_tm[6] = sum6; output0_tm[7] = sum7; output0_tm += 8; #endif // __ARM_NEON } for (; i + 3 < tiles; i += 4) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" // inch loop "lsr w4, %w6, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #32] \n" "ld1r {v0.4s}, [%5], #4 \n" "fmla v8.4s, v4.4s, v0.4s \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" // inch loop "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4]! \n" "pld [%5, #32] \n" "vld1.f32 {d0[],d1[]}, [%5]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0]! \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; for (int q = 0; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[1] * ktm0[0]; sum2 += bb2p0[2] * ktm0[0]; sum3 += bb2p0[3] * ktm0[0]; bb2p0 += 4; ktm0 += 1; } output0_tm[0] = sum0; output0_tm[1] = sum1; output0_tm[2] = sum2; output0_tm[3] = sum3; output0_tm += 4; #endif // __ARM_NEON } for (; i < tiles; i++) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); const float* ktm0 = kernel_tm0.row(r); int q = 0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q + 3 < inch; q += 4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = vget_lane_f32(vpadd_f32(_ss0, _ss0), 0); #endif // __aarch64__ #else float sum0 = 0.f; #endif for (; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; bb2p0 += 1; ktm0 += 1; } output0_tm[0] = sum0; output0_tm += 1; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = {4.f, 8.f, 16.f, 32.f}; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { #if __ARM_NEON #if __aarch64__ const float* output0_tm0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm / 8 + j + tiles * 8); const float* output0_tm2 = out0_tm.row(i * w_tm / 8 + j + tiles * 16); const float* output0_tm3 = out0_tm.row(i * w_tm / 8 + j + tiles * 24); for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _output0_tm_00 = {}; float32x4_t _output0_tm_11 = {}; float32x4_t _output0_tm_22 = {}; float32x4_t _output0_tm_33 = {}; float32x4_t _output0_tm_44 = {}; float32x4_t _output0_tm_55 = {}; float32x4_t _output0_tm_66 = {}; float32x4_t _output0_tm_77 = {}; _output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0); _output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1); _output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2); _output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0 += out0_tm.w * tiles * 25; output0_tm1 += out0_tm.w * tiles * 25; output0_tm2 += out0_tm.w * tiles * 25; output0_tm3 += out0_tm.w * tiles * 25; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m = 0; m + 1 < 6; m += 2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8 * 2; t1 += 8 * 2; output0 += outw * 2; output1 += outw * 2; } #else // __aarch64__ const float* output0_tm0_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm1_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 8); const float* output0_tm2_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 16); const float* output0_tm3_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 24); float* t0 = tmp[0]; float* t1 = tmp[1]; int step = out0_tm.w * tiles * 4; int step2 = out0_tm.w * tiles * 25 * 4; asm volatile( // loop0 "vld1.f32 {d16[0]}, [%2], %13 \n" "vld1.f32 {d16[1]}, [%3], %13 \n" "vld1.f32 {d17[0]}, [%4], %13 \n" "vld1.f32 {d17[1]}, [%5], %13 \n" "vld1.f32 {d20[0]}, [%2], %13 \n" "vld1.f32 {d20[1]}, [%3], %13 \n" "vld1.f32 {d21[0]}, [%4], %13 \n" "vld1.f32 {d21[1]}, [%5], %13 \n" "vld1.f32 {d24[0]}, [%2], %13 \n" "vld1.f32 {d24[1]}, [%3], %13 \n" "vld1.f32 {d25[0]}, [%4], %13 \n" "vld1.f32 {d25[1]}, [%5], %13 \n" "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vld1.f32 {d28[0]}, [%2], %13 \n" "vld1.f32 {d28[1]}, [%3], %13 \n" "vld1.f32 {d29[0]}, [%4], %13 \n" "vld1.f32 {d29[1]}, [%5], %13 \n" "vld1.f32 {d18[0]}, [%2], %13 \n" "vld1.f32 {d18[1]}, [%3], %13 \n" "vld1.f32 {d19[0]}, [%4], %13 \n" "vld1.f32 {d19[1]}, [%5], %13 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vld1.f32 {d22[0]}, [%2], %13 \n" "vld1.f32 {d22[1]}, [%3], %13 \n" "vld1.f32 {d23[0]}, [%4], %13 \n" "vld1.f32 {d23[1]}, [%5], %13 \n" "vld1.f32 {d26[0]}, [%2], %13 \n" "vld1.f32 {d26[1]}, [%3], %13 \n" "vld1.f32 {d27[0]}, [%4], %13 \n" "vld1.f32 {d27[1]}, [%5], %13 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vld1.f32 {d30[0]}, [%2], %14 \n" "vld1.f32 {d30[1]}, [%3], %14 \n" "vld1.f32 {d31[0]}, [%4], %14 \n" "vld1.f32 {d31[1]}, [%5], %14 \n" "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f12[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f12[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f12[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e12[0] \n" "vmla.f32 q11, q5, %e12[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f12[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e12[1] \n" "vmla.f32 q11, q7, %e12[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 "vld1.f32 {d16[0]}, [%2], %13 \n" "vld1.f32 {d16[1]}, [%3], %13 \n" "vld1.f32 {d17[0]}, [%4], %13 \n" "vld1.f32 {d17[1]}, [%5], %13 \n" "vld1.f32 {d20[0]}, [%2], %13 \n" "vld1.f32 {d20[1]}, [%3], %13 \n" "vld1.f32 {d21[0]}, [%4], %13 \n" "vld1.f32 {d21[1]}, [%5], %13 \n" "vld1.f32 {d24[0]}, [%2], %13 \n" "vld1.f32 {d24[1]}, [%3], %13 \n" "vld1.f32 {d25[0]}, [%4], %13 \n" "vld1.f32 {d25[1]}, [%5], %13 \n" "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vld1.f32 {d28[0]}, [%2], %13 \n" "vld1.f32 {d28[1]}, [%3], %13 \n" "vld1.f32 {d29[0]}, [%4], %13 \n" "vld1.f32 {d29[1]}, [%5], %13 \n" "vld1.f32 {d18[0]}, [%2], %13 \n" "vld1.f32 {d18[1]}, [%3], %13 \n" "vld1.f32 {d19[0]}, [%4], %13 \n" "vld1.f32 {d19[1]}, [%5], %13 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vld1.f32 {d22[0]}, [%2], %13 \n" "vld1.f32 {d22[1]}, [%3], %13 \n" "vld1.f32 {d23[0]}, [%4], %13 \n" "vld1.f32 {d23[1]}, [%5], %13 \n" "vld1.f32 {d26[0]}, [%2], %13 \n" "vld1.f32 {d26[1]}, [%3], %13 \n" "vld1.f32 {d27[0]}, [%4], %13 \n" "vld1.f32 {d27[1]}, [%5], %13 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vld1.f32 {d30[0]}, [%2] \n" "vld1.f32 {d30[1]}, [%3] \n" "vld1.f32 {d31[0]}, [%4] \n" "vld1.f32 {d31[1]}, [%5] \n" "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f12[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f12[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f12[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e12[0] \n" "vmla.f32 q11, q5, %e12[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f12[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e12[1] \n" "vmla.f32 q11, q7, %e12[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm1_0), // %3 "=r"(output0_tm2_0), // %4 "=r"(output0_tm3_0) // %5 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm1_0), "4"(output0_tm2_0), "5"(output0_tm3_0), "w"(_coeff), // %12 "r"(step), // %13 "r"(step2) // %14 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw * 2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm_1 = out0_tm.row(i * w_tm / 8 + j + tiles); const float* output0_tm_2 = out0_tm.row(i * w_tm / 8 + j + tiles * 2); const float* output0_tm_3 = out0_tm.row(i * w_tm / 8 + j + tiles * 3); const float* output0_tm_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 4); const float* output0_tm_5 = out0_tm.row(i * w_tm / 8 + j + tiles * 5); const float* output0_tm_6 = out0_tm.row(i * w_tm / 8 + j + tiles * 6); const float* output0_tm_7 = out0_tm.row(i * w_tm / 8 + j + tiles * 7); for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 8; output0_tm_1 += out0_tm.w * tiles * 8; output0_tm_2 += out0_tm.w * tiles * 8; output0_tm_3 += out0_tm.w * tiles * 8; output0_tm_4 += out0_tm.w * tiles * 8; output0_tm_5 += out0_tm.w * tiles * 8; output0_tm_6 += out0_tm.w * tiles * 8; output0_tm_7 += out0_tm.w * tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad if (top_blob_bordered.w != top_blob.w || top_blob_bordered.h != top_blob.h) copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p * inch * 9; const float* k1 = kernel + (p + 1) * inch * 9; for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0 + 3); float32x4_t _k06 = vld1q_f32(k0 + 6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1 + 3); float32x4_t _k16 = vld1q_f32(k1 + 6); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" // v8 v9 = r0 "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n" // v6 = _sum0 "fmul v12.4s, v8.4s, %12.s[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n" // v7 = _sum1 "fmul v13.4s, v8.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld2 {v10.4s, v11.4s}, [%3] \n" // v10 "fmla v6.4s, v9.4s, %12.s[1] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v7.4s, v9.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n" // r1 "fmla v12.4s, v14.4s, %12.s[2] \n" "fmla v13.4s, v14.4s, %15.s[2] \n" "prfm pldl1keep, [%4, #128] \n" "ld2 {v10.4s, v11.4s}, [%4] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v12.4s, v9.4s, %13.s[1] \n" "fmla v13.4s, v9.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n" // r2 "fmla v6.4s, v14.4s, %13.s[2] \n" "fmla v7.4s, v14.4s, %16.s[2] \n" "prfm pldl1keep, [%5, #128] \n" "ld2 {v10.4s, v11.4s}, [%5] \n" "fmla v12.4s, v8.4s, %14.s[0] \n" "fmla v13.4s, v8.4s, %17.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v6.4s, v9.4s, %14.s[1] \n" "fmla v7.4s, v9.4s, %17.s[1] \n" "fmla v12.4s, v14.4s, %14.s[2] \n" "fmla v13.4s, v14.4s, %17.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" // v8 v9 = r0 "fadd v6.4s, v6.4s, v12.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "bne 0b \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n" // q8 q9 = r0 "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n" // q6 = _sum0 "vmul.f32 q12, q8, %e12[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n" // q7 = _sum1 "vmul.f32 q13, q8, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d20-d21}, [%3] \n" // q10 "vmla.f32 q6, q9, %e12[1] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q7, q9, %e15[1] \n" "pld [%4, #256] \n" "vld2.f32 {d16-d19}, [%4]! \n" // r1 "vmla.f32 q12, q11, %f12[0] \n" "vmla.f32 q13, q11, %f15[0] \n" "pld [%4, #128] \n" "vld2.f32 {d20-d21}, [%4] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q12, q9, %e13[1] \n" "vmla.f32 q13, q9, %e16[1] \n" "pld [%5, #256] \n" "vld2.f32 {d16-d19}, [%5]! \n" // r2 "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "pld [%5, #128] \n" "vld2.f32 {d20-d21}, [%5] \n" "vmla.f32 q12, q8, %e14[0] \n" "vmla.f32 q13, q8, %e17[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q6, q9, %e14[1] \n" "vmla.f32 q7, q9, %e17[1] \n" "vmla.f32 q12, q11, %f14[0] \n" "vmla.f32 q13, q11, %f17[0] \n" "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n" // q8 q9 = r0 "vadd.f32 q6, q6, q12 \n" "vadd.f32 q7, q7, q13 \n" "subs %0, #1 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "bne 0b \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr0++; outptr1++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9; k1 += 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p * inch * 9; for (int q = 0; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } } static void conv3x3s2_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8 * 9, inch, outch / 8 + outch % 8); const float* kernel = _kernel; int p = 0; for (; p + 7 < outch; p += 8) { const float* k0 = kernel + (p + 0) * inch * 9; const float* k1 = kernel + (p + 1) * inch * 9; const float* k2 = kernel + (p + 2) * inch * 9; const float* k3 = kernel + (p + 3) * inch * 9; const float* k4 = kernel + (p + 4) * inch * 9; const float* k5 = kernel + (p + 5) * inch * 9; const float* k6 = kernel + (p + 6) * inch * 9; const float* k7 = kernel + (p + 7) * inch * 9; float* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q < inch; q++) { for (int k = 0; k < 9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p < outch; p++) { const float* k0 = kernel + (p + 0) * inch * 9; float* ktmp = kernel_tm.channel(p / 8 + p % 8); for (int q = 0; q < inch; q++) { for (int k = 0; k < 9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; // const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p + 0); Mat out1 = top_blob.channel(p + 1); Mat out2 = top_blob.channel(p + 2); Mat out3 = top_blob.channel(p + 3); Mat out4 = top_blob.channel(p + 4); Mat out5 = top_blob.channel(p + 5); Mat out6 = top_blob.channel(p + 6); Mat out7 = top_blob.channel(p + 7); const float bias0 = bias ? bias[p + 0] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; const float bias4 = bias ? bias[p + 4] : 0.f; const float bias5 = bias ? bias[p + 5] : 0.f; const float bias6 = bias ? bias[p + 6] : 0.f; const float bias7 = bias ? bias[p + 7] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); out6.fill(bias6); out7.fill(bias7); const float* ktmp = _kernel.channel(p / 8); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v8.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v9.4s}, [%2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v10.4s}, [%3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v11.4s}, [%4] \n" /// "prfm pldl1keep, [%9, #256] \n" "ld2 {v4.4s, v5.4s}, [%9], #32 \n" // v4=00 v5=01 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v12.4s}, [%5] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v13.4s}, [%6] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v14.4s}, [%7] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v15.4s}, [%8] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld2 {v6.4s, v7.4s}, [%9] \n" // v6 "fmla v8.4s, v5.4s, v2.s[0] \n" "fmla v9.4s, v5.4s, v2.s[1] \n" "fmla v10.4s, v5.4s, v2.s[2] \n" "fmla v11.4s, v5.4s, v2.s[3] \n" "ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=02 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v5.4s, v3.s[0] \n" "fmla v13.4s, v5.4s, v3.s[1] \n" "fmla v14.4s, v5.4s, v3.s[2] \n" "fmla v15.4s, v5.4s, v3.s[3] \n" /// "prfm pldl1keep, [%10, #256] \n" "ld2 {v4.4s, v5.4s}, [%10], #32 \n" // v4=10 v5=11 "fmla v8.4s, v6.4s, v0.s[0] \n" "fmla v9.4s, v6.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v6.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v6.4s, v1.s[0] \n" "fmla v13.4s, v6.4s, v1.s[1] \n" "fmla v14.4s, v6.4s, v1.s[2] \n" "fmla v15.4s, v6.4s, v1.s[3] \n" "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "prfm pldl1keep, [%10, #256] \n" "ld2 {v6.4s, v7.4s}, [%10] \n" // v6 "fmla v8.4s, v5.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v5.4s, v0.s[2] \n" "fmla v11.4s, v5.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=12 "fmla v12.4s, v5.4s, v1.s[0] \n" "fmla v13.4s, v5.4s, v1.s[1] \n" "fmla v14.4s, v5.4s, v1.s[2] \n" "fmla v15.4s, v5.4s, v1.s[3] \n" /// "prfm pldl1keep, [%11, #256] \n" "ld2 {v4.4s, v5.4s}, [%11], #32 \n" // v4=20 v5=21 "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v6.4s, v3.s[0] \n" "fmla v13.4s, v6.4s, v3.s[1] \n" "fmla v14.4s, v6.4s, v3.s[2] \n" "fmla v15.4s, v6.4s, v3.s[3] \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "prfm pldl1keep, [%11, #256] \n" "ld2 {v6.4s, v7.4s}, [%11] \n" // v6 "fmla v8.4s, v5.4s, v2.s[0] \n" "fmla v9.4s, v5.4s, v2.s[1] \n" "fmla v10.4s, v5.4s, v2.s[2] \n" "fmla v11.4s, v5.4s, v2.s[3] \n" "ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=22 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v5.4s, v3.s[0] \n" "fmla v13.4s, v5.4s, v3.s[1] \n" "fmla v14.4s, v5.4s, v3.s[2] \n" "fmla v15.4s, v5.4s, v3.s[3] \n" "fmla v8.4s, v6.4s, v0.s[0] \n" "fmla v9.4s, v6.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v6.4s, v0.s[3] \n" "fmla v12.4s, v6.4s, v1.s[0] \n" "fmla v13.4s, v6.4s, v1.s[1] \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "fmla v14.4s, v6.4s, v1.s[2] \n" "fmla v15.4s, v6.4s, v1.s[3] \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" "sub %12, %12, #288 \n" "st1 {v12.4s}, [%5], #16 \n" "st1 {v13.4s}, [%6], #16 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s}, [%7], #16 \n" "st1 {v15.4s}, [%8], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else // __aarch64__ for (; nn > 0; nn--) { asm volatile( "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0] \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1] \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2] \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3] \n" /// "pld [%8, #256] \n" "vld2.f32 {d8-d11}, [%8]! \n" // q4=00 q5=01 "vld1.f32 {d0-d3}, [%11 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4] \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "pld [%6, #128] \n" "vld1.f32 {d28-d29}, [%6] \n" "pld [%7, #128] \n" "vld1.f32 {d30-d31}, [%7] \n" "vld1.f32 {d4-d7}, [%11 :128]! \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "pld [%8, #128] \n" "vld2.f32 {d12-d13}, [%8] \n" // q6 "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vext.f32 q6, q4, q6, #1 \n" // q6=02 "vld1.f32 {d0-d3}, [%11 :128]! \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" /// "pld [%9, #256] \n" "vld2.f32 {d8-d11}, [%9]! \n" // q4=10 q5=11 "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vld1.f32 {d4-d7}, [%11 :128]! \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q9, q4, d4[1] \n" "vmla.f32 q10, q4, d5[0] \n" "vmla.f32 q11, q4, d5[1] \n" "vld1.f32 {d0-d3}, [%11 :128]! \n" "vmla.f32 q12, q4, d6[0] \n" "vmla.f32 q13, q4, d6[1] \n" "vmla.f32 q14, q4, d7[0] \n" "vmla.f32 q15, q4, d7[1] \n" "pld [%9, #128] \n" "vld2.f32 {d12-d13}, [%9] \n" // q6 "vmla.f32 q8, q5, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q5, d1[0] \n" "vmla.f32 q11, q5, d1[1] \n" "vld1.f32 {d4-d7}, [%11 :128]! \n" "vext.f32 q6, q4, q6, #1 \n" // q6=12 "vmla.f32 q12, q5, d2[0] \n" "vmla.f32 q13, q5, d2[1] \n" "vmla.f32 q14, q5, d3[0] \n" "vmla.f32 q15, q5, d3[1] \n" /// "pld [%10, #256] \n" "vld2.f32 {d8-d11}, [%10]! \n" // q4=20 q5=21 "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vld1.f32 {d0-d3}, [%11 :128]! \n" "vmla.f32 q12, q6, d6[0] \n" "vmla.f32 q13, q6, d6[1] \n" "vmla.f32 q14, q6, d7[0] \n" "vmla.f32 q15, q6, d7[1] \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vld1.f32 {d4-d7}, [%11 :128]! \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "pld [%10, #128] \n" "vld2.f32 {d12-d13}, [%10] \n" // q6 "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vext.f32 q6, q4, q6, #1 \n" // q6=22 "vld1.f32 {d0-d3}, [%11 :128]! \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vst1.f32 {d16-d17}, [%0]! \n" "vst1.f32 {d18-d19}, [%1]! \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "vst1.f32 {d20-d21}, [%2]! \n" "vst1.f32 {d22-d23}, [%3]! \n" "sub %11, %11, #288 \n" "vst1.f32 {d24-d25}, [%4]! \n" "vst1.f32 {d26-d27}, [%5]! \n" "vst1.f32 {d28-d29}, [%6]! \n" "vst1.f32 {d30-d31}, [%7]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v0.4s}, [%8] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "ld1 {v8.s}[0], [%0] \n" "ld1 {v8.s}[1], [%1] \n" "ld1 {v8.s}[2], [%2] \n" "ld1 {v8.s}[3], [%3] \n" "fmul v14.4s, v10.4s, v0.s[0] \n" "fmul v15.4s, v11.4s, v0.s[0] \n" "ld1 {v9.s}[0], [%4] \n" "ld1 {v9.s}[1], [%5] \n" "ld1 {v9.s}[2], [%6] \n" "ld1 {v9.s}[3], [%7] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v0.s[1] \n" "fmla v9.4s, v13.4s, v0.s[1] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v0.s[2] \n" "fmla v15.4s, v11.4s, v0.s[2] \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v1.4s}, [%9] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v1.s[0] \n" "fmla v9.4s, v13.4s, v1.s[0] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v1.s[1] \n" "fmla v15.4s, v11.4s, v1.s[1] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v1.s[2] \n" "fmla v9.4s, v13.4s, v1.s[2] \n" "prfm pldl1keep, [%10, #128] \n" "ld1 {v0.4s}, [%10] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v0.s[0] \n" "fmla v15.4s, v11.4s, v0.s[0] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v0.s[1] \n" "fmla v9.4s, v13.4s, v0.s[1] \n" "fmla v14.4s, v10.4s, v0.s[2] \n" "fmla v15.4s, v11.4s, v0.s[2] \n" "fadd v8.4s, v8.4s, v14.4s \n" "fadd v9.4s, v9.4s, v15.4s \n" "sub %11, %11, #288 \n" "st1 {v8.s}[0], [%0], #4 \n" "st1 {v8.s}[1], [%1], #4 \n" "st1 {v8.s}[2], [%2], #4 \n" "st1 {v8.s}[3], [%3], #4 \n" "st1 {v9.s}[0], [%4], #4 \n" "st1 {v9.s}[1], [%5], #4 \n" "st1 {v9.s}[2], [%6], #4 \n" "st1 {v9.s}[3], [%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "vld1.f32 {d20-d23}, [%11 :128]! \n" "pld [%8, #128] \n" "vld1.f32 {d0-d1}, [%8] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vld1.f32 {d16[0]}, [%0] \n" "vld1.f32 {d16[1]}, [%1] \n" "vld1.f32 {d17[0]}, [%2] \n" "vld1.f32 {d17[1]}, [%3] \n" "vmul.f32 q14, q10, d0[0] \n" "vmul.f32 q15, q11, d0[0] \n" "vld1.f32 {d18[0]}, [%4] \n" "vld1.f32 {d18[1]}, [%5] \n" "vld1.f32 {d19[0]}, [%6] \n" "vld1.f32 {d19[1]}, [%7] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d0[1] \n" "vmla.f32 q9, q13, d0[1] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[0] \n" "pld [%9, #128] \n" "vld1.f32 {d2-d3}, [%9] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d2[0] \n" "vmla.f32 q9, q13, d2[0] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d2[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d3[0] \n" "vmla.f32 q9, q13, d3[0] \n" "pld [%10, #128] \n" "vld1.f32 {d0-d1}, [%10] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q11, d0[0] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d0[1] \n" "vmla.f32 q9, q13, d0[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[0] \n" "vadd.f32 q8, q8, q14 \n" "vadd.f32 q9, q9, q15 \n" "sub %11, %11, #288 \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%1]! \n" "vst1.f32 {d17[0]}, [%2]! \n" "vst1.f32 {d17[1]}, [%3]! \n" "vst1.f32 {d18[0]}, [%4]! \n" "vst1.f32 {d18[1]}, [%5]! \n" "vst1.f32 {d19[0]}, [%6]! \n" "vst1.f32 {d19[1]}, [%7]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else // __ARM_NEON float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; float sum4 = 0.f; float sum5 = 0.f; float sum6 = 0.f; float sum7 = 0.f; sum0 += r0[0] * ktmp[0]; sum1 += r0[0] * ktmp[1]; sum2 += r0[0] * ktmp[2]; sum3 += r0[0] * ktmp[3]; sum4 += r0[0] * ktmp[4]; sum5 += r0[0] * ktmp[5]; sum6 += r0[0] * ktmp[6]; sum7 += r0[0] * ktmp[7]; ktmp += 8; sum0 += r0[1] * ktmp[0]; sum1 += r0[1] * ktmp[1]; sum2 += r0[1] * ktmp[2]; sum3 += r0[1] * ktmp[3]; sum4 += r0[1] * ktmp[4]; sum5 += r0[1] * ktmp[5]; sum6 += r0[1] * ktmp[6]; sum7 += r0[1] * ktmp[7]; ktmp += 8; sum0 += r0[2] * ktmp[0]; sum1 += r0[2] * ktmp[1]; sum2 += r0[2] * ktmp[2]; sum3 += r0[2] * ktmp[3]; sum4 += r0[2] * ktmp[4]; sum5 += r0[2] * ktmp[5]; sum6 += r0[2] * ktmp[6]; sum7 += r0[2] * ktmp[7]; ktmp += 8; sum0 += r1[0] * ktmp[0]; sum1 += r1[0] * ktmp[1]; sum2 += r1[0] * ktmp[2]; sum3 += r1[0] * ktmp[3]; sum4 += r1[0] * ktmp[4]; sum5 += r1[0] * ktmp[5]; sum6 += r1[0] * ktmp[6]; sum7 += r1[0] * ktmp[7]; ktmp += 8; sum0 += r1[1] * ktmp[0]; sum1 += r1[1] * ktmp[1]; sum2 += r1[1] * ktmp[2]; sum3 += r1[1] * ktmp[3]; sum4 += r1[1] * ktmp[4]; sum5 += r1[1] * ktmp[5]; sum6 += r1[1] * ktmp[6]; sum7 += r1[1] * ktmp[7]; ktmp += 8; sum0 += r1[2] * ktmp[0]; sum1 += r1[2] * ktmp[1]; sum2 += r1[2] * ktmp[2]; sum3 += r1[2] * ktmp[3]; sum4 += r1[2] * ktmp[4]; sum5 += r1[2] * ktmp[5]; sum6 += r1[2] * ktmp[6]; sum7 += r1[2] * ktmp[7]; ktmp += 8; sum0 += r2[0] * ktmp[0]; sum1 += r2[0] * ktmp[1]; sum2 += r2[0] * ktmp[2]; sum3 += r2[0] * ktmp[3]; sum4 += r2[0] * ktmp[4]; sum5 += r2[0] * ktmp[5]; sum6 += r2[0] * ktmp[6]; sum7 += r2[0] * ktmp[7]; ktmp += 8; sum0 += r2[1] * ktmp[0]; sum1 += r2[1] * ktmp[1]; sum2 += r2[1] * ktmp[2]; sum3 += r2[1] * ktmp[3]; sum4 += r2[1] * ktmp[4]; sum5 += r2[1] * ktmp[5]; sum6 += r2[1] * ktmp[6]; sum7 += r2[1] * ktmp[7]; ktmp += 8; sum0 += r2[2] * ktmp[0]; sum1 += r2[2] * ktmp[1]; sum2 += r2[2] * ktmp[2]; sum3 += r2[2] * ktmp[3]; sum4 += r2[2] * ktmp[4]; sum5 += r2[2] * ktmp[5]; sum6 += r2[2] * ktmp[6]; sum7 += r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8 * 9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8 * 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* ktmp = _kernel.channel(p / 8 + p % 8); for (int q = 0; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = ktmp; const float* k1 = ktmp + 3; const float* k2 = ktmp + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * ktmp[0]; sum += r0[1] * ktmp[1]; sum += r0[2] * ktmp[2]; sum += r1[0] * ktmp[3]; sum += r1[1] * ktmp[4]; sum += r1[2] * ktmp[5]; sum += r2[0] * ktmp[6]; sum += r2[1] * ktmp[7]; sum += r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: break; } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.212656*R+0.715158*G+0.072186*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (0.212656*(double) i); y_map[i].y=(MagickRealType) (0.715158*(double) i); z_map[i].y=(MagickRealType) (0.072186*(double) i); x_map[i].z=(MagickRealType) (0.212656*(double) i); y_map[i].z=(MagickRealType) (0.715158*(double) i); z_map[i].z=(MagickRealType) (0.072186*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617*i; y_map[i].x=0.007778268551236748*i; z_map[i].x=0.001510600706713781*i; x_map[i].y=(-0.002426619775463276)*i; y_map[i].y=(-0.004763965913702149)*i; z_map[i].y=0.007190585689165425*i; x_map[i].z=0.006927257754597858*i; y_map[i].z=(-0.005800713697502058)*i; z_map[i].z=(-0.0011265440570958)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099*i-0.099); y_map[i].x=0.4321260306242638*(1.099*i-0.099); z_map[i].x=0.08392226148409894*(1.099*i-0.099); x_map[i].y=(-0.1348122097479598)*(1.099*i-0.099); y_map[i].y=(-0.2646647729834528)*(1.099*i-0.099); z_map[i].y=0.3994769827314126*(1.099*i-0.099); x_map[i].z=0.3848476530332144*(1.099*i-0.099); y_map[i].z=(-0.3222618720834477)*(1.099*i-0.099); z_map[i].z=(-0.06258578094976668)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); if (IsGrayColorspace(colorspace) != MagickFalse) { if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) image->gamma=1.0; image->type=GrayscaleType; } else if (IsRGBColorspace(colorspace) != MagickFalse) image->gamma=1.0; if (image->gamma == (1.000/2.200)) { image->rendering_intent=PerceptualIntent; image->gamma=1.000/2.200; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); if (image->colorspace == colorspace) return(MagickTrue); /* same colorspace: no op */ /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,image->colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel) { pixel->red=((QuantumRange-(QuantumScale*pixel->red* (QuantumRange-pixel->index)+pixel->index))); pixel->green=((QuantumRange-(QuantumScale*pixel->green* (QuantumRange-pixel->index)+pixel->index))); pixel->blue=((QuantumRange-(QuantumScale*pixel->blue* (QuantumRange-pixel->index)+pixel->index))); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(q); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: break; } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))]); green=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))]); blue=ClampToQuantum((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))]); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
copyin-2.c
/* { dg-do run } */ /* { dg-options "-O2" } */ /* { dg-require-effective-target tls_runtime } */ #include <omp.h> #include <stdlib.h> struct { int t; char buf[64]; } thr = { 32, "" }; #pragma omp threadprivate (thr) int main (void) { int l = 0; omp_set_dynamic (0); omp_set_num_threads (6); #pragma omp parallel copyin (thr) reduction (||:l) { l = thr.t != 32; thr.t = omp_get_thread_num () + 11; } if (l || thr.t != 11) abort (); #pragma omp parallel reduction (||:l) l = thr.t != omp_get_thread_num () + 11; if (l) abort (); return 0; }