source
stringlengths
3
92
c
stringlengths
26
2.25M
SpatialMatching.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialMatching.c" #else #define square(x) ((x)*(x)) #define max(x,y) (((x)>(y)) ? (x) : (y)) #define min(x,y) (((x)>(y)) ? (y) : (x)) static int nn_(SpatialMatching_updateOutput)(lua_State *L) { // get all params THTensor *input1 = luaT_checkudata(L, 2, torch_Tensor); THTensor *input2 = luaT_checkudata(L, 3, torch_Tensor); int maxw = luaT_getfieldcheckint(L, 1, "maxw"); int maxh = luaT_getfieldcheckint(L, 1, "maxh"); int full_output = luaT_getfieldcheckboolean(L, 1, "full_output"); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); // dims int iwidth = input1->size[2]; int iheight = input1->size[1]; int ichannels = input1->size[0]; // make contiguous //input1 = THTensor_(newContiguous)(input1); //input2 = THTensor_(newContiguous)(input2); //output = THTensor_(newContiguous)(output); // zero output THTensor_(fill)(output, 1e30); // get strides long *i1s = input1->stride; long *i2s = input2->stride; long *os = output->stride; // get pointers real *input1_p = THTensor_(data)(input1); real *input2_p = THTensor_(data)(input2); real *output_p = THTensor_(data)(output); // compute output int x1,y1,x2,y2,k; real dist; if (full_output) { // get halves of window size int halfh1 = ceil((real)maxh/2)-1; int halfh2 = floor((real)maxh/2)+1; int halfw1 = ceil((real)maxw/2)-1; int halfw2 = floor((real)maxw/2)+1; long dy, dx; #pragma omp parallel for private(x1,x2,y2,k,dist,dy,dx) for (y1 = 0; y1 < iheight; y1++) { for (x1 = 0; x1 < iwidth; x1++) { for (y2 = max(0,y1-halfh1); y2 < min(iheight,y1+halfh2); y2++) { for (x2 = max(0,(x1-halfw1)); x2 < min(iwidth,x1+halfw2); x2++) { dist = 0; for (k = 0; k < ichannels; k++) { dist += square(input1_p[k*i1s[0] + y1*i1s[1] + x1*i1s[2]] - input2_p[k*i2s[0] + y2*i2s[1] + x2*i2s[2]]); } dy = y2-y1 + halfh1; dx = x2-x1 + halfw1; output_p[dy*os[2] + dx*os[3] + y1*os[0] + x1*os[1]] = dist; } } } } /* real *input1_p_it_start = input1_p, *input1_p_it_end = input1_p+ichannels*i1s[0]; real *input1_p_it, *input2_p_it; for (y1 = 0; y1 < iheight; y1++) { for (x1 = 0; x1 < iwidth; x1++, ++input1_p_it_start, ++input1_p_it_end) { for (y2 = max(0,y1-halfh1); y2 < min(iheight,y1+halfh2); y2++) { for (x2 = max(0,(x1-halfw1)); x2 < min(iwidth,x1+halfw2); x2++) { dist = 0; for (input1_p_it = input1_p_it_start, input2_p_it=input2_p+y2*i2s[1]+x2*i2s[2]; input1_p_it != input1_p_it_end; input1_p_it += i1s[0], input2_p_it += i2s[0]) { dist += square(*input1_p_it - *input2_p_it); } dy = y2-y1 + halfh1; dx = x2-x1 + halfw1; output_p[dy*os[0] + dx*os[1] + y1*os[2] + x1*os[3]] = dist; } } } } */ } else { #pragma omp parallel for private(y1,x1,x2,y2,k,dist) for (y1 = 0; y1 < iheight; y1++) { for (x1 = 0; x1 < iwidth; x1++) { for (y2 = y1; y2 < y1+maxh; y2++) { for (x2 = x1; x2 < x1+maxw; x2++) { dist = 0; for (k = 0; k < ichannels; k++) { dist += square(input1_p[k*i1s[0] + y1*i1s[1] + x1*i1s[2]] - input2_p[k*i2s[0] + y2*i2s[1] + x2*i2s[2]]); } output_p[(y2-y1)*os[2] + (x2-x1)*os[3] + y1*os[0] + x1*os[1]] = dist; } } } } } // done return 1; } static int nn_(SpatialMatching_updateGradInput)(lua_State *L) { // get all params THTensor *input1 = luaT_checkudata(L, 2, torch_Tensor); THTensor *input2 = luaT_checkudata(L, 3, torch_Tensor); THTensor *gradInput1 = luaT_getfieldcheckudata(L, 1, "gradInput1", torch_Tensor); THTensor *gradInput2 = luaT_getfieldcheckudata(L, 1, "gradInput2", torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 4, torch_Tensor); int full_output = luaT_getfieldcheckboolean(L, 1, "full_output"); int maxw = luaT_getfieldcheckint(L, 1, "maxw"); int maxh = luaT_getfieldcheckint(L, 1, "maxh"); // dims int iwidth = input1->size[2]; int iheight = input1->size[1]; int ichannels = input1->size[0]; // get strides long *i1s = input1->stride; long *i2s = input2->stride; long *gi1s = gradInput1->stride; long *gi2s = gradInput2->stride; long *gos = gradOutput->stride; // get pointers real *input1_p = THTensor_(data)(input1); real *input2_p = THTensor_(data)(input2); real *gradInput1_p = THTensor_(data)(gradInput1); real *gradInput2_p = THTensor_(data)(gradInput2); real *gradOutput_p = THTensor_(data)(gradOutput); // compute gradients int x1, y1, x2, y2, k; real partial_d; if (full_output) { // get halves of window size int halfh1 = ceil((real)maxh/2)-1; int halfh2 = floor((real)maxh/2)+1; int halfw1 = ceil((real)maxw/2)-1; int halfw2 = floor((real)maxw/2)+1; long dy, dx; //#pragma omp parallel for private(x1,x2,y2,k,dy,dx,partial_d) NO! gradInput has += for (y1 = 0; y1 < iheight; y1++) { for (x1 = 0; x1 < iwidth; x1++) { for (y2 = max(0,y1-halfh1); y2 < min(iheight,y1+halfh2); y2++) { for (x2 = max(0,(x1-halfw1)); x2 < min(iwidth,x1+halfw2); x2++) { dy = y2-y1 + halfh1; dx = x2-x1 + halfw1; for (k=0; k<ichannels; k++) { partial_d = 2*(input1_p[k*i1s[0] + y1*i1s[1] + x1*i1s[2]] - input2_p[k*i2s[0] + y2*i2s[1] + x2*i2s[2]]); partial_d *= gradOutput_p[dy*gos[2] + dx*gos[3] + y1*gos[0] + x1*gos[1]]; gradInput1_p[k*gi1s[0] + y1*gi1s[1] + x1*gi1s[2]] += partial_d; gradInput2_p[k*gi2s[0] + y2*gi2s[1] + x2*gi2s[2]] -= partial_d; } } } } } } else { //#pragma omp parallel for private(x1,x2,y2,k,partial_d) for (y1 = 0; y1 < iheight; y1++) { for (x1 = 0; x1 < iwidth; x1++) { for (y2 = y1; y2 < y1+maxh; y2++) { for (x2 = x1; x2 < x1+maxw; x2++) { for (k = 0; k < ichannels; k++) { partial_d = 2*(input1_p[k*i1s[0] + y1*i1s[1] + x1*i1s[2]] - input2_p[k*i2s[0] + y2*i2s[1] + x2*i2s[2]]); partial_d *= gradOutput_p[(y2-y1)*gos[2]+(x2-x1)*gos[3]+y1*gos[0]+x1*gos[1]]; gradInput1_p[k*gi1s[0] + y1*gi1s[1] + x1*gi1s[2]] += partial_d; gradInput2_p[k*gi2s[0] + y2*gi2s[1] + x2*gi2s[2]] -= partial_d; } } } } } } // done return 1; } static const struct luaL_Reg nn_(SpatialMatching__) [] = { {"SpatialMatching_updateOutput", nn_(SpatialMatching_updateOutput)}, {"SpatialMatching_updateGradInput", nn_(SpatialMatching_updateGradInput)}, {NULL, NULL} }; static void nn_(SpatialMatching_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(SpatialMatching__), "nn"); lua_pop(L,1); } #endif
GB_unaryop__minv_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_uint8 // op(A') function: GB_tran__minv_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_uint8 ( int16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi_openmp.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <omp.h> #define MAX_ITER 10000 // Maximum value of the matrix element #define MAX 10000 #define TOL 0.000001 // Generate a random float number with the maximum value of max float rand_float(const int max) { return ((float)rand() / (float)(RAND_MAX)) * max; } // Calculates how many rows are given, as maximum, to each thread int get_max_rows(const int num_threads, const int n) { return (int)(ceil((n-2) / num_threads) + 2); } // Allocate 2D matrix with random floats void alloc_matrix(float **mat, const int n, const int m) { *mat = (float *) malloc(n * m * sizeof(float)); const int size = n*m; for (int i = 0; i < size; i++) { (*mat)[i] = rand_float(MAX); } } // Write the time results into a CSV file void write_to_file(int n, int t, int i, char *schedule_type, float total_time, float exec_time) { FILE *f; char* file_name = "jacobi_omp_results.csv"; if (access(file_name, F_OK) == -1) { f = fopen(file_name, "a"); fprintf(f, "Matrix size;No. of maximum threads;No. of iterations;Schedule type;Total time;Operations time;\n"); } else { f = fopen(file_name, "a"); } fprintf(f, "%d;%d;%d;%s;%f;%f;\n", n, t, i, schedule_type, total_time, exec_time); fclose(f); } // Solves the matrix splitting the rows into different threads int solver(float **mat, const int n, const int m, const int num_ths, const int max_cells_per_th) { float diff; int done = 0; int cnt_iter = 0; const int mat_dim = n * n; while (!done && (cnt_iter < MAX_ITER)) { diff = 0; // define temp array float temp[n][n]; // Neither the first row nor the last row are solved // (that's why both 'i' and 'j' start at 1 and go up to '[nm]-1') #pragma omp parallel for num_threads(num_ths) schedule(static, max_cells_per_th) collapse(2) reduction(+:diff) for (int i = 1; i < n-1; i++) { for (int j = 1; j < m-1; j++) { const int pos = (i * m) + j; temp[i][j] = 0.25f * ( (*mat)[pos] + (*mat)[pos - 1] + (*mat)[pos - n] + (*mat)[pos + 1] + (*mat)[pos + n] ); diff += abs((*mat)[pos] - temp[i][j]); } } if (diff/mat_dim < TOL) { done = 1; } cnt_iter ++; // substitue new values for (int i = 1; i < n-1; i++) { for (int j = 1; j < m-1; j++) { const int pos = (i * m) + j; (*mat)[pos] = temp[i][j]; } } } printf("Solver converged after %d iterations\n", cnt_iter); return cnt_iter; } int main(int argc, char *argv[]) { if (argc < 2) { printf("Call this program with two parameters: matrix_size communication \n"); printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n"); printf("\t num_of_threads\n"); exit(1); } const int n = atoi(argv[1]); omp_set_dynamic(0); // Explicitly disable dynamic teams const int t = atoi(argv[2]); omp_set_num_threads(t); // Use defined threads for all consecutive parallel regions // Start recording the time const double i_total_t = omp_get_wtime(); float *mat; alloc_matrix(&mat, n, n); // Calculate how many cells as maximum per thread const int max_threads = omp_get_max_threads(); const int max_rows = get_max_rows(max_threads, n); const int max_cells = max_rows * (n-2); // Initial operation time const double i_exec_t = omp_get_wtime(); // Parallelized solver const int num_iter = solver(&mat, n, n, max_threads, max_cells); // Final operation time const double f_exec_t = omp_get_wtime(); free(mat); // Finish recording the time const double f_total_t = omp_get_wtime(); //const int current_num_threads = omp_get_num_threads(); const double total_time = f_total_t - i_total_t; const double exec_time = f_exec_t - i_exec_t; //printf("No. of threads: %d\n", current_num_threads); printf("No. of maximum threads: %d\n", max_threads); printf("Total time: %lf\n", total_time); printf("Operations time: %lf\n", exec_time); write_to_file(n, max_threads, num_iter, "static", total_time, exec_time); }
parallel-numthreads.c
// Test if/num_threads clause handling #include <assert.h> #include <stdio.h> int main(void) { int i=0; #pragma omp parallel if(i==0) num_threads(3) { #pragma omp single { assert (omp_get_num_threads() == 3 ); } printf("Mutual exclusive output 1.\n"); } #pragma omp parallel if(i!=0) num_threads(3) { #pragma omp single { assert (omp_get_num_threads() == 1 ); } printf("Mutual exclusive output 2.\n"); } return 0; }
GB_unaryop__identity_fp32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_fp64 // op(A') function: GB_tran__identity_fp32_fp64 // C type: float // A type: double // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_fp64 ( float *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
q_rhashmap_mk_loc.c
#include "q_rhashmap_common.h" #include "q_rhashmap_mk_loc.h" int q_rhashmap_mk_loc( uint32_t *hashes, // input [nkeys] uint32_t nkeys, // input uint32_t hmap_size, // input uint32_t *locs // output [nkeys] ) { int status = 0; int chunk_size = 1024; uint64_t divinfo = fast_div32_init(hmap_size); #pragma omp parallel for schedule(static, chunk_size) for ( uint32_t i = 0; i < nkeys; i++ ) { locs[i] = fast_rem32(hashes[i], hmap_size, divinfo); } return status; }
decl2.c
/* Process declarations and variables for C++ compiler. Copyright (C) 1988-2018 Free Software Foundation, Inc. Hacked by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C++ front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "memmodel.h" #include "target.h" #include "cp-tree.h" #include "c-family/c-common.h" #include "timevar.h" #include "stringpool.h" #include "cgraph.h" #include "varasm.h" #include "attribs.h" #include "stor-layout.h" #include "calls.h" #include "decl.h" #include "toplev.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "dumpfile.h" #include "intl.h" #include "c-family/c-ada-spec.h" #include "asan.h" /* Id for dumping the raw trees. */ int raw_dump_id; extern cpp_reader *parse_in; /* This structure contains information about the initializations and/or destructions required for a particular priority level. */ typedef struct priority_info_s { /* Nonzero if there have been any initializations at this priority throughout the translation unit. */ int initializations_p; /* Nonzero if there have been any destructions at this priority throughout the translation unit. */ int destructions_p; } *priority_info; static void mark_vtable_entries (tree); static bool maybe_emit_vtables (tree); static tree start_objects (int, int); static void finish_objects (int, int, tree); static tree start_static_storage_duration_function (unsigned); static void finish_static_storage_duration_function (tree); static priority_info get_priority_info (int); static void do_static_initialization_or_destruction (tree, bool); static void one_static_initialization_or_destruction (tree, tree, bool); static void generate_ctor_or_dtor_function (bool, int, location_t *); static int generate_ctor_and_dtor_functions_for_priority (splay_tree_node, void *); static tree prune_vars_needing_no_initialization (tree *); static void write_out_vars (tree); static void import_export_class (tree); static tree get_guard_bits (tree); static void determine_visibility_from_class (tree, tree); static bool determine_hidden_inline (tree); static void maybe_instantiate_decl (tree); /* A list of static class variables. This is needed, because a static class variable can be declared inside the class without an initializer, and then initialized, statically, outside the class. */ static GTY(()) vec<tree, va_gc> *pending_statics; /* A list of functions which were declared inline, but which we may need to emit outline anyway. */ static GTY(()) vec<tree, va_gc> *deferred_fns; /* A list of decls that use types with no linkage, which we need to make sure are defined. */ static GTY(()) vec<tree, va_gc> *no_linkage_decls; /* A vector of alternating decls and identifiers, where the latter is to be an alias for the former if the former is defined. */ static GTY(()) vec<tree, va_gc> *mangling_aliases; /* hash traits for declarations. Hashes single decls via DECL_ASSEMBLER_NAME_RAW. */ struct mangled_decl_hash : ggc_remove <tree> { typedef tree value_type; /* A DECL. */ typedef tree compare_type; /* An identifier. */ static hashval_t hash (const value_type decl) { return IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME_RAW (decl)); } static bool equal (const value_type existing, compare_type candidate) { tree name = DECL_ASSEMBLER_NAME_RAW (existing); return candidate == name; } static inline void mark_empty (value_type &p) {p = NULL_TREE;} static inline bool is_empty (value_type p) {return !p;} static bool is_deleted (value_type e) { return e == reinterpret_cast <value_type> (1); } static void mark_deleted (value_type &e) { e = reinterpret_cast <value_type> (1); } }; /* A hash table of decls keyed by mangled name. Used to figure out if we need compatibility aliases. */ static GTY(()) hash_table<mangled_decl_hash> *mangled_decls; /* Nonzero if we're done parsing and into end-of-file activities. */ int at_eof; /* True if note_mangling_alias should enqueue mangling aliases for later generation, rather than emitting them right away. */ bool defer_mangling_aliases = true; /* Return a member function type (a METHOD_TYPE), given FNTYPE (a FUNCTION_TYPE), CTYPE (class type), and QUALS (the cv-qualifiers that apply to the function). */ tree build_memfn_type (tree fntype, tree ctype, cp_cv_quals quals, cp_ref_qualifier rqual) { tree raises; tree attrs; int type_quals; bool late_return_type_p; if (fntype == error_mark_node || ctype == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (fntype) == FUNCTION_TYPE || TREE_CODE (fntype) == METHOD_TYPE); type_quals = quals & ~TYPE_QUAL_RESTRICT; ctype = cp_build_qualified_type (ctype, type_quals); raises = TYPE_RAISES_EXCEPTIONS (fntype); attrs = TYPE_ATTRIBUTES (fntype); late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (fntype); fntype = build_method_type_directly (ctype, TREE_TYPE (fntype), (TREE_CODE (fntype) == METHOD_TYPE ? TREE_CHAIN (TYPE_ARG_TYPES (fntype)) : TYPE_ARG_TYPES (fntype))); if (attrs) fntype = cp_build_type_attribute_variant (fntype, attrs); if (rqual) fntype = build_ref_qualified_type (fntype, rqual); if (raises) fntype = build_exception_variant (fntype, raises); if (late_return_type_p) TYPE_HAS_LATE_RETURN_TYPE (fntype) = 1; return fntype; } /* Return a variant of FNTYPE, a FUNCTION_TYPE or METHOD_TYPE, with its return type changed to NEW_RET. */ tree change_return_type (tree new_ret, tree fntype) { tree newtype; tree args = TYPE_ARG_TYPES (fntype); tree raises = TYPE_RAISES_EXCEPTIONS (fntype); tree attrs = TYPE_ATTRIBUTES (fntype); bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (fntype); if (new_ret == error_mark_node) return fntype; if (same_type_p (new_ret, TREE_TYPE (fntype))) return fntype; if (TREE_CODE (fntype) == FUNCTION_TYPE) { newtype = build_function_type (new_ret, args); newtype = apply_memfn_quals (newtype, type_memfn_quals (fntype), type_memfn_rqual (fntype)); } else newtype = build_method_type_directly (class_of_this_parm (fntype), new_ret, TREE_CHAIN (args)); if (FUNCTION_REF_QUALIFIED (fntype)) newtype = build_ref_qualified_type (newtype, type_memfn_rqual (fntype)); if (raises) newtype = build_exception_variant (newtype, raises); if (attrs) newtype = cp_build_type_attribute_variant (newtype, attrs); if (late_return_type_p) TYPE_HAS_LATE_RETURN_TYPE (newtype) = 1; return newtype; } /* Build a PARM_DECL of FN with NAME and TYPE, and set DECL_ARG_TYPE appropriately. */ tree cp_build_parm_decl (tree fn, tree name, tree type) { tree parm = build_decl (input_location, PARM_DECL, name, type); DECL_CONTEXT (parm) = fn; /* DECL_ARG_TYPE is only used by the back end and the back end never sees templates. */ if (!processing_template_decl) DECL_ARG_TYPE (parm) = type_passed_as (type); return parm; } /* Returns a PARM_DECL of FN for a parameter of the indicated TYPE, with the indicated NAME. */ tree build_artificial_parm (tree fn, tree name, tree type) { tree parm = cp_build_parm_decl (fn, name, type); DECL_ARTIFICIAL (parm) = 1; /* All our artificial parms are implicitly `const'; they cannot be assigned to. */ TREE_READONLY (parm) = 1; return parm; } /* Constructors for types with virtual baseclasses need an "in-charge" flag saying whether this constructor is responsible for initialization of virtual baseclasses or not. All destructors also need this "in-charge" flag, which additionally determines whether or not the destructor should free the memory for the object. This function adds the "in-charge" flag to member function FN if appropriate. It is called from grokclassfn and tsubst. FN must be either a constructor or destructor. The in-charge flag follows the 'this' parameter, and is followed by the VTT parm (if any), then the user-written parms. */ void maybe_retrofit_in_chrg (tree fn) { tree basetype, arg_types, parms, parm, fntype; /* If we've already add the in-charge parameter don't do it again. */ if (DECL_HAS_IN_CHARGE_PARM_P (fn)) return; /* When processing templates we can't know, in general, whether or not we're going to have virtual baseclasses. */ if (processing_template_decl) return; /* We don't need an in-charge parameter for constructors that don't have virtual bases. */ if (DECL_CONSTRUCTOR_P (fn) && !CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn))) return; arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); basetype = TREE_TYPE (TREE_VALUE (arg_types)); arg_types = TREE_CHAIN (arg_types); parms = DECL_CHAIN (DECL_ARGUMENTS (fn)); /* If this is a subobject constructor or destructor, our caller will pass us a pointer to our VTT. */ if (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn))) { parm = build_artificial_parm (fn, vtt_parm_identifier, vtt_parm_type); /* First add it to DECL_ARGUMENTS between 'this' and the real args... */ DECL_CHAIN (parm) = parms; parms = parm; /* ...and then to TYPE_ARG_TYPES. */ arg_types = hash_tree_chain (vtt_parm_type, arg_types); DECL_HAS_VTT_PARM_P (fn) = 1; } /* Then add the in-charge parm (before the VTT parm). */ parm = build_artificial_parm (fn, in_charge_identifier, integer_type_node); DECL_CHAIN (parm) = parms; parms = parm; arg_types = hash_tree_chain (integer_type_node, arg_types); /* Insert our new parameter(s) into the list. */ DECL_CHAIN (DECL_ARGUMENTS (fn)) = parms; /* And rebuild the function type. */ fntype = build_method_type_directly (basetype, TREE_TYPE (TREE_TYPE (fn)), arg_types); if (TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn))) fntype = build_exception_variant (fntype, TYPE_RAISES_EXCEPTIONS (TREE_TYPE (fn))); if (TYPE_ATTRIBUTES (TREE_TYPE (fn))) fntype = (cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (TREE_TYPE (fn)))); TREE_TYPE (fn) = fntype; /* Now we've got the in-charge parameter. */ DECL_HAS_IN_CHARGE_PARM_P (fn) = 1; } /* Classes overload their constituent function names automatically. When a function name is declared in a record structure, its name is changed to it overloaded name. Since names for constructors and destructors can conflict, we place a leading '$' for destructors. CNAME is the name of the class we are grokking for. FUNCTION is a FUNCTION_DECL. It was created by `grokdeclarator'. FLAGS contains bits saying what's special about today's arguments. DTOR_FLAG == DESTRUCTOR. If FUNCTION is a destructor, then we must add the `auto-delete' field as a second parameter. There is some hair associated with the fact that we must "declare" this variable in the manner consistent with the way the rest of the arguments were declared. QUALS are the qualifiers for the this pointer. */ void grokclassfn (tree ctype, tree function, enum overload_flags flags) { tree fn_name = DECL_NAME (function); /* Even within an `extern "C"' block, members get C++ linkage. See [dcl.link] for details. */ SET_DECL_LANGUAGE (function, lang_cplusplus); if (fn_name == NULL_TREE) { error ("name missing for member function"); fn_name = get_identifier ("<anonymous>"); DECL_NAME (function) = fn_name; } DECL_CONTEXT (function) = ctype; if (flags == DTOR_FLAG) DECL_CXX_DESTRUCTOR_P (function) = 1; if (flags == DTOR_FLAG || DECL_CONSTRUCTOR_P (function)) maybe_retrofit_in_chrg (function); } /* Create an ARRAY_REF, checking for the user doing things backwards along the way. DECLTYPE_P is for N3276, as in the parser. */ tree grok_array_decl (location_t loc, tree array_expr, tree index_exp, bool decltype_p) { tree type; tree expr; tree orig_array_expr = array_expr; tree orig_index_exp = index_exp; tree overload = NULL_TREE; if (error_operand_p (array_expr) || error_operand_p (index_exp)) return error_mark_node; if (processing_template_decl) { if (type_dependent_expression_p (array_expr) || type_dependent_expression_p (index_exp)) return build_min_nt_loc (loc, ARRAY_REF, array_expr, index_exp, NULL_TREE, NULL_TREE); array_expr = build_non_dependent_expr (array_expr); index_exp = build_non_dependent_expr (index_exp); } type = TREE_TYPE (array_expr); gcc_assert (type); type = non_reference (type); /* If they have an `operator[]', use that. */ if (MAYBE_CLASS_TYPE_P (type) || MAYBE_CLASS_TYPE_P (TREE_TYPE (index_exp))) { tsubst_flags_t complain = tf_warning_or_error; if (decltype_p) complain |= tf_decltype; expr = build_new_op (loc, ARRAY_REF, LOOKUP_NORMAL, array_expr, index_exp, NULL_TREE, &overload, complain); } else { tree p1, p2, i1, i2; /* Otherwise, create an ARRAY_REF for a pointer or array type. It is a little-known fact that, if `a' is an array and `i' is an int, you can write `i[a]', which means the same thing as `a[i]'. */ if (TREE_CODE (type) == ARRAY_TYPE || VECTOR_TYPE_P (type)) p1 = array_expr; else p1 = build_expr_type_conversion (WANT_POINTER, array_expr, false); if (TREE_CODE (TREE_TYPE (index_exp)) == ARRAY_TYPE) p2 = index_exp; else p2 = build_expr_type_conversion (WANT_POINTER, index_exp, false); i1 = build_expr_type_conversion (WANT_INT | WANT_ENUM, array_expr, false); i2 = build_expr_type_conversion (WANT_INT | WANT_ENUM, index_exp, false); if ((p1 && i2) && (i1 && p2)) error ("ambiguous conversion for array subscript"); if (p1 && i2) array_expr = p1, index_exp = i2; else if (i1 && p2) array_expr = p2, index_exp = i1; else { error ("invalid types %<%T[%T]%> for array subscript", type, TREE_TYPE (index_exp)); return error_mark_node; } if (array_expr == error_mark_node || index_exp == error_mark_node) error ("ambiguous conversion for array subscript"); if (TREE_CODE (TREE_TYPE (array_expr)) == POINTER_TYPE) array_expr = mark_rvalue_use (array_expr); else array_expr = mark_lvalue_use_nonread (array_expr); index_exp = mark_rvalue_use (index_exp); expr = build_array_ref (input_location, array_expr, index_exp); } if (processing_template_decl && expr != error_mark_node) { if (overload != NULL_TREE) return (build_min_non_dep_op_overload (ARRAY_REF, expr, overload, orig_array_expr, orig_index_exp)); return build_min_non_dep (ARRAY_REF, expr, orig_array_expr, orig_index_exp, NULL_TREE, NULL_TREE); } return expr; } /* Given the cast expression EXP, checking out its validity. Either return an error_mark_node if there was an unavoidable error, return a cast to void for trying to delete a pointer w/ the value 0, or return the call to delete. If DOING_VEC is true, we handle things differently for doing an array delete. Implements ARM $5.3.4. This is called from the parser. */ tree delete_sanity (tree exp, tree size, bool doing_vec, int use_global_delete, tsubst_flags_t complain) { tree t, type; if (exp == error_mark_node) return exp; if (processing_template_decl) { t = build_min (DELETE_EXPR, void_type_node, exp, size); DELETE_EXPR_USE_GLOBAL (t) = use_global_delete; DELETE_EXPR_USE_VEC (t) = doing_vec; TREE_SIDE_EFFECTS (t) = 1; return t; } /* An array can't have been allocated by new, so complain. */ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE) warning (0, "deleting array %q#E", exp); t = build_expr_type_conversion (WANT_POINTER, exp, true); if (t == NULL_TREE || t == error_mark_node) { error ("type %q#T argument given to %<delete%>, expected pointer", TREE_TYPE (exp)); return error_mark_node; } type = TREE_TYPE (t); /* As of Valley Forge, you can delete a pointer to const. */ /* You can't delete functions. */ if (TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE) { error ("cannot delete a function. Only pointer-to-objects are " "valid arguments to %<delete%>"); return error_mark_node; } /* Deleting ptr to void is undefined behavior [expr.delete/3]. */ if (VOID_TYPE_P (TREE_TYPE (type))) { warning (OPT_Wdelete_incomplete, "deleting %qT is undefined", type); doing_vec = 0; } /* Deleting a pointer with the value zero is valid and has no effect. */ if (integer_zerop (t)) return build1 (NOP_EXPR, void_type_node, t); if (doing_vec) return build_vec_delete (t, /*maxindex=*/NULL_TREE, sfk_deleting_destructor, use_global_delete, complain); else return build_delete (type, t, sfk_deleting_destructor, LOOKUP_NORMAL, use_global_delete, complain); } /* Report an error if the indicated template declaration is not the sort of thing that should be a member template. */ void check_member_template (tree tmpl) { tree decl; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); decl = DECL_TEMPLATE_RESULT (tmpl); if (TREE_CODE (decl) == FUNCTION_DECL || DECL_ALIAS_TEMPLATE_P (tmpl) || (TREE_CODE (decl) == TYPE_DECL && MAYBE_CLASS_TYPE_P (TREE_TYPE (decl)))) { /* The parser rejects template declarations in local classes (with the exception of generic lambdas). */ gcc_assert (!current_function_decl || LAMBDA_FUNCTION_P (decl)); /* The parser rejects any use of virtual in a function template. */ gcc_assert (!(TREE_CODE (decl) == FUNCTION_DECL && DECL_VIRTUAL_P (decl))); /* The debug-information generating code doesn't know what to do with member templates. */ DECL_IGNORED_P (tmpl) = 1; } else if (variable_template_p (tmpl)) /* OK */; else error ("template declaration of %q#D", decl); } /* Sanity check: report error if this function FUNCTION is not really a member of the class (CTYPE) it is supposed to belong to. TEMPLATE_PARMS is used to specify the template parameters of a member template passed as FUNCTION_DECL. If the member template is passed as a TEMPLATE_DECL, it can be NULL since the parameters can be extracted from the declaration. If the function is not a function template, it must be NULL. It returns the original declaration for the function, NULL_TREE if no declaration was found, error_mark_node if an error was emitted. */ tree check_classfn (tree ctype, tree function, tree template_parms) { if (DECL_USE_TEMPLATE (function) && !(TREE_CODE (function) == TEMPLATE_DECL && DECL_TEMPLATE_SPECIALIZATION (function)) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (function))) /* Since this is a specialization of a member template, we're not going to find the declaration in the class. For example, in: struct S { template <typename T> void f(T); }; template <> void S::f(int); we're not going to find `S::f(int)', but there's no reason we should, either. We let our callers know we didn't find the method, but we don't complain. */ return NULL_TREE; /* Basic sanity check: for a template function, the template parameters either were not passed, or they are the same of DECL_TEMPLATE_PARMS. */ if (TREE_CODE (function) == TEMPLATE_DECL) { if (template_parms && !comp_template_parms (template_parms, DECL_TEMPLATE_PARMS (function))) { error ("template parameter lists provided don%'t match the " "template parameters of %qD", function); return error_mark_node; } template_parms = DECL_TEMPLATE_PARMS (function); } /* OK, is this a definition of a member template? */ bool is_template = (template_parms != NULL_TREE); /* [temp.mem] A destructor shall not be a member template. */ if (DECL_DESTRUCTOR_P (function) && is_template) { error ("destructor %qD declared as member template", function); return error_mark_node; } /* We must enter the scope here, because conversion operators are named by target type, and type equivalence relies on typenames resolving within the scope of CTYPE. */ tree pushed_scope = push_scope (ctype); tree matched = NULL_TREE; tree fns = get_class_binding (ctype, DECL_NAME (function)); for (ovl_iterator iter (fns); !matched && iter; ++iter) { tree fndecl = *iter; /* A member template definition only matches a member template declaration. */ if (is_template != (TREE_CODE (fndecl) == TEMPLATE_DECL)) continue; if (!DECL_DECLARES_FUNCTION_P (fndecl)) continue; tree p1 = TYPE_ARG_TYPES (TREE_TYPE (function)); tree p2 = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); /* We cannot simply call decls_match because this doesn't work for static member functions that are pretending to be methods, and because the name may have been changed by asm("new_name"). */ /* Get rid of the this parameter on functions that become static. */ if (DECL_STATIC_FUNCTION_P (fndecl) && TREE_CODE (TREE_TYPE (function)) == METHOD_TYPE) p1 = TREE_CHAIN (p1); /* ref-qualifier or absence of same must match. */ if (type_memfn_rqual (TREE_TYPE (function)) != type_memfn_rqual (TREE_TYPE (fndecl))) continue; // Include constraints in the match. tree c1 = get_constraints (function); tree c2 = get_constraints (fndecl); /* While finding a match, same types and params are not enough if the function is versioned. Also check version ("target") attributes. */ if (same_type_p (TREE_TYPE (TREE_TYPE (function)), TREE_TYPE (TREE_TYPE (fndecl))) && compparms (p1, p2) && !targetm.target_option.function_versions (function, fndecl) && (!is_template || comp_template_parms (template_parms, DECL_TEMPLATE_PARMS (fndecl))) && equivalent_constraints (c1, c2) && (DECL_TEMPLATE_SPECIALIZATION (function) == DECL_TEMPLATE_SPECIALIZATION (fndecl)) && (!DECL_TEMPLATE_SPECIALIZATION (function) || (DECL_TI_TEMPLATE (function) == DECL_TI_TEMPLATE (fndecl)))) matched = fndecl; } if (!matched) { if (!COMPLETE_TYPE_P (ctype)) cxx_incomplete_type_error (function, ctype); else { if (DECL_CONV_FN_P (function)) fns = get_class_binding (ctype, conv_op_identifier); error_at (DECL_SOURCE_LOCATION (function), "no declaration matches %q#D", function); if (fns) print_candidates (fns); else if (DECL_CONV_FN_P (function)) inform (DECL_SOURCE_LOCATION (function), "no conversion operators declared"); else inform (DECL_SOURCE_LOCATION (function), "no functions named %qD", function); inform (DECL_SOURCE_LOCATION (TYPE_NAME (ctype)), "%#qT defined here", ctype); } matched = error_mark_node; } if (pushed_scope) pop_scope (pushed_scope); return matched; } /* DECL is a function with vague linkage. Remember it so that at the end of the translation unit we can decide whether or not to emit it. */ void note_vague_linkage_fn (tree decl) { if (processing_template_decl) return; DECL_DEFER_OUTPUT (decl) = 1; vec_safe_push (deferred_fns, decl); } /* As above, but for variable template instantiations. */ void note_variable_template_instantiation (tree decl) { vec_safe_push (pending_statics, decl); } /* We have just processed the DECL, which is a static data member. The other parameters are as for cp_finish_decl. */ void finish_static_data_member_decl (tree decl, tree init, bool init_const_expr_p, tree asmspec_tree, int flags) { DECL_CONTEXT (decl) = current_class_type; /* We cannot call pushdecl here, because that would fill in the TREE_CHAIN of our decl. Instead, we modify cp_finish_decl to do the right thing, namely, to put this decl out straight away. */ if (! processing_template_decl) vec_safe_push (pending_statics, decl); if (LOCAL_CLASS_P (current_class_type) /* We already complained about the template definition. */ && !DECL_TEMPLATE_INSTANTIATION (decl)) permerror (input_location, "local class %q#T shall not have static data member %q#D", current_class_type, decl); else for (tree t = current_class_type; TYPE_P (t); t = CP_TYPE_CONTEXT (t)) if (TYPE_UNNAMED_P (t)) { if (permerror (DECL_SOURCE_LOCATION (decl), "static data member %qD in unnamed class", decl)) inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)), "unnamed class defined here"); break; } DECL_IN_AGGR_P (decl) = 1; if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE && TYPE_DOMAIN (TREE_TYPE (decl)) == NULL_TREE) SET_VAR_HAD_UNKNOWN_BOUND (decl); if (init) { /* Similarly to start_decl_1, we want to complete the type in order to do the right thing in cp_apply_type_quals_to_decl, possibly clear TYPE_QUAL_CONST (c++/65579). */ tree type = TREE_TYPE (decl) = complete_type (TREE_TYPE (decl)); cp_apply_type_quals_to_decl (cp_type_quals (type), decl); } cp_finish_decl (decl, init, init_const_expr_p, asmspec_tree, flags); } /* DECLARATOR and DECLSPECS correspond to a class member. The other parameters are as for cp_finish_decl. Return the DECL for the class member declared. */ tree grokfield (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, tree init, bool init_const_expr_p, tree asmspec_tree, tree attrlist) { tree value; const char *asmspec = 0; int flags; tree name; if (init && TREE_CODE (init) == TREE_LIST && TREE_VALUE (init) == error_mark_node && TREE_CHAIN (init) == NULL_TREE) init = NULL_TREE; value = grokdeclarator (declarator, declspecs, FIELD, init != 0, &attrlist); if (! value || value == error_mark_node) /* friend or constructor went bad. */ return error_mark_node; if (TREE_TYPE (value) == error_mark_node) return value; if (TREE_CODE (value) == TYPE_DECL && init) { error ("typedef %qD is initialized (use decltype instead)", value); init = NULL_TREE; } /* Pass friendly classes back. */ if (value == void_type_node) return value; name = DECL_NAME (value); if (name != NULL_TREE) { if (TREE_CODE (name) == TEMPLATE_ID_EXPR) { error ("explicit template argument list not allowed"); return error_mark_node; } if (IDENTIFIER_POINTER (name)[0] == '_' && id_equal (name, "_vptr")) error ("member %qD conflicts with virtual function table field name", value); } /* Stash away type declarations. */ if (TREE_CODE (value) == TYPE_DECL) { DECL_NONLOCAL (value) = 1; DECL_CONTEXT (value) = current_class_type; if (attrlist) { int attrflags = 0; /* If this is a typedef that names the class for linkage purposes (7.1.3p8), apply any attributes directly to the type. */ if (OVERLOAD_TYPE_P (TREE_TYPE (value)) && value == TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value)))) attrflags = ATTR_FLAG_TYPE_IN_PLACE; cplus_decl_attributes (&value, attrlist, attrflags); } if (decl_spec_seq_has_spec_p (declspecs, ds_typedef) && TREE_TYPE (value) != error_mark_node && TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value))) != value) set_underlying_type (value); /* It's important that push_template_decl below follows set_underlying_type above so that the created template carries the properly set type of VALUE. */ if (processing_template_decl) value = push_template_decl (value); record_locally_defined_typedef (value); return value; } int friendp = decl_spec_seq_has_spec_p (declspecs, ds_friend); if (!friendp && DECL_IN_AGGR_P (value)) { error ("%qD is already defined in %qT", value, DECL_CONTEXT (value)); return void_type_node; } if (asmspec_tree && asmspec_tree != error_mark_node) asmspec = TREE_STRING_POINTER (asmspec_tree); if (init) { if (TREE_CODE (value) == FUNCTION_DECL) { if (init == ridpointers[(int)RID_DELETE]) { if (friendp && decl_defined_p (value)) { error ("redefinition of %q#D", value); inform (DECL_SOURCE_LOCATION (value), "%q#D previously defined here", value); } else { DECL_DELETED_FN (value) = 1; DECL_DECLARED_INLINE_P (value) = 1; DECL_INITIAL (value) = error_mark_node; } } else if (init == ridpointers[(int)RID_DEFAULT]) { if (defaultable_fn_check (value)) { DECL_DEFAULTED_FN (value) = 1; DECL_INITIALIZED_IN_CLASS_P (value) = 1; DECL_DECLARED_INLINE_P (value) = 1; } } else if (TREE_CODE (init) == DEFAULT_ARG) error ("invalid initializer for member function %qD", value); else if (TREE_CODE (TREE_TYPE (value)) == METHOD_TYPE) { if (integer_zerop (init)) DECL_PURE_VIRTUAL_P (value) = 1; else if (error_operand_p (init)) ; /* An error has already been reported. */ else error ("invalid initializer for member function %qD", value); } else { gcc_assert (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE); if (friendp) error ("initializer specified for friend function %qD", value); else error ("initializer specified for static member function %qD", value); } } else if (TREE_CODE (value) == FIELD_DECL) /* C++11 NSDMI, keep going. */; else if (!VAR_P (value)) gcc_unreachable (); } /* Pass friend decls back. */ if ((TREE_CODE (value) == FUNCTION_DECL || TREE_CODE (value) == TEMPLATE_DECL) && DECL_CONTEXT (value) != current_class_type) return value; /* Need to set this before push_template_decl. */ if (VAR_P (value)) DECL_CONTEXT (value) = current_class_type; if (processing_template_decl && VAR_OR_FUNCTION_DECL_P (value)) { value = push_template_decl (value); if (error_operand_p (value)) return error_mark_node; } if (attrlist) cplus_decl_attributes (&value, attrlist, 0); if (init && DIRECT_LIST_INIT_P (init)) flags = LOOKUP_NORMAL; else flags = LOOKUP_IMPLICIT; switch (TREE_CODE (value)) { case VAR_DECL: finish_static_data_member_decl (value, init, init_const_expr_p, asmspec_tree, flags); return value; case FIELD_DECL: if (asmspec) error ("%<asm%> specifiers are not permitted on non-static data members"); if (DECL_INITIAL (value) == error_mark_node) init = error_mark_node; cp_finish_decl (value, init, /*init_const_expr_p=*/false, NULL_TREE, flags); DECL_IN_AGGR_P (value) = 1; return value; case FUNCTION_DECL: if (asmspec) set_user_assembler_name (value, asmspec); cp_finish_decl (value, /*init=*/NULL_TREE, /*init_const_expr_p=*/false, asmspec_tree, flags); /* Pass friends back this way. */ if (DECL_FRIEND_P (value)) return void_type_node; DECL_IN_AGGR_P (value) = 1; return value; default: gcc_unreachable (); } return NULL_TREE; } /* Like `grokfield', but for bitfields. WIDTH is the width of the bitfield, a constant expression. The other parameters are as for grokfield. */ tree grokbitfield (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, tree width, tree init, tree attrlist) { tree value = grokdeclarator (declarator, declspecs, BITFIELD, init != NULL_TREE, &attrlist); if (value == error_mark_node) return NULL_TREE; /* friends went bad. */ if (TREE_TYPE (value) == error_mark_node) return value; /* Pass friendly classes back. */ if (VOID_TYPE_P (value)) return void_type_node; if (!INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (value)) && (POINTER_TYPE_P (value) || !dependent_type_p (TREE_TYPE (value)))) { error ("bit-field %qD with non-integral type", value); return error_mark_node; } if (TREE_CODE (value) == TYPE_DECL) { error ("cannot declare %qD to be a bit-field type", value); return NULL_TREE; } /* Usually, finish_struct_1 catches bitfields with invalid types. But, in the case of bitfields with function type, we confuse ourselves into thinking they are member functions, so we must check here. */ if (TREE_CODE (value) == FUNCTION_DECL) { error ("cannot declare bit-field %qD with function type", DECL_NAME (value)); return NULL_TREE; } if (width && TYPE_WARN_IF_NOT_ALIGN (TREE_TYPE (value))) { error ("cannot declare bit-field %qD with %<warn_if_not_aligned%> type", DECL_NAME (value)); return NULL_TREE; } if (DECL_IN_AGGR_P (value)) { error ("%qD is already defined in the class %qT", value, DECL_CONTEXT (value)); return void_type_node; } if (TREE_STATIC (value)) { error ("static member %qD cannot be a bit-field", value); return NULL_TREE; } int flags = LOOKUP_IMPLICIT; if (init && DIRECT_LIST_INIT_P (init)) flags = LOOKUP_NORMAL; cp_finish_decl (value, init, false, NULL_TREE, flags); if (width != error_mark_node) { /* The width must be an integer type. */ if (!type_dependent_expression_p (width) && !INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (width))) error ("width of bit-field %qD has non-integral type %qT", value, TREE_TYPE (width)); else { /* Temporarily stash the width in DECL_BIT_FIELD_REPRESENTATIVE. check_bitfield_decl picks it from there later and sets DECL_SIZE accordingly. */ DECL_BIT_FIELD_REPRESENTATIVE (value) = width; SET_DECL_C_BIT_FIELD (value); } } DECL_IN_AGGR_P (value) = 1; if (attrlist) cplus_decl_attributes (&value, attrlist, /*flags=*/0); return value; } /* Returns true iff ATTR is an attribute which needs to be applied at instantiation time rather than template definition time. */ static bool is_late_template_attribute (tree attr, tree decl) { tree name = get_attribute_name (attr); tree args = TREE_VALUE (attr); const struct attribute_spec *spec = lookup_attribute_spec (name); tree arg; if (!spec) /* Unknown attribute. */ return false; /* Attribute weak handling wants to write out assembly right away. */ if (is_attribute_p ("weak", name)) return true; /* Attributes used and unused are applied directly to typedefs for the benefit of maybe_warn_unused_local_typedefs. */ if (TREE_CODE (decl) == TYPE_DECL && (is_attribute_p ("unused", name) || is_attribute_p ("used", name))) return false; /* Attribute tls_model wants to modify the symtab. */ if (is_attribute_p ("tls_model", name)) return true; /* #pragma omp declare simd attribute needs to be always deferred. */ if (flag_openmp && is_attribute_p ("omp declare simd", name)) return true; /* An attribute pack is clearly dependent. */ if (args && PACK_EXPANSION_P (args)) return true; /* If any of the arguments are dependent expressions, we can't evaluate the attribute until instantiation time. */ for (arg = args; arg; arg = TREE_CHAIN (arg)) { tree t = TREE_VALUE (arg); /* If the first attribute argument is an identifier, only consider second and following arguments. Attributes like mode, format, cleanup and several target specific attributes aren't late just because they have an IDENTIFIER_NODE as first argument. */ if (arg == args && attribute_takes_identifier_p (name) && identifier_p (t)) continue; if (value_dependent_expression_p (t) || type_dependent_expression_p (t)) return true; } if (TREE_CODE (decl) == TYPE_DECL || TYPE_P (decl) || spec->type_required) { tree type = TYPE_P (decl) ? decl : TREE_TYPE (decl); /* We can't apply any attributes to a completely unknown type until instantiation time. */ enum tree_code code = TREE_CODE (type); if (code == TEMPLATE_TYPE_PARM || code == BOUND_TEMPLATE_TEMPLATE_PARM || code == TYPENAME_TYPE) return true; /* Also defer most attributes on dependent types. This is not necessary in all cases, but is the better default. */ else if (dependent_type_p (type) /* But some attributes specifically apply to templates. */ && !is_attribute_p ("abi_tag", name) && !is_attribute_p ("deprecated", name) && !is_attribute_p ("visibility", name)) return true; else return false; } else return false; } /* ATTR_P is a list of attributes. Remove any attributes which need to be applied at instantiation time and return them. If IS_DEPENDENT is true, the declaration itself is dependent, so all attributes should be applied at instantiation time. */ static tree splice_template_attributes (tree *attr_p, tree decl) { tree *p = attr_p; tree late_attrs = NULL_TREE; tree *q = &late_attrs; if (!p) return NULL_TREE; for (; *p; ) { if (is_late_template_attribute (*p, decl)) { ATTR_IS_DEPENDENT (*p) = 1; *q = *p; *p = TREE_CHAIN (*p); q = &TREE_CHAIN (*q); *q = NULL_TREE; } else p = &TREE_CHAIN (*p); } return late_attrs; } /* Remove any late attributes from the list in ATTR_P and attach them to DECL_P. */ static void save_template_attributes (tree *attr_p, tree *decl_p, int flags) { tree *q; if (attr_p && *attr_p == error_mark_node) return; tree late_attrs = splice_template_attributes (attr_p, *decl_p); if (!late_attrs) return; if (DECL_P (*decl_p)) q = &DECL_ATTRIBUTES (*decl_p); else q = &TYPE_ATTRIBUTES (*decl_p); tree old_attrs = *q; /* Merge the late attributes at the beginning with the attribute list. */ late_attrs = merge_attributes (late_attrs, *q); if (*q != late_attrs && !DECL_P (*decl_p) && !(flags & ATTR_FLAG_TYPE_IN_PLACE)) { if (!dependent_type_p (*decl_p)) *decl_p = cp_build_type_attribute_variant (*decl_p, late_attrs); else { *decl_p = build_variant_type_copy (*decl_p); TYPE_ATTRIBUTES (*decl_p) = late_attrs; } } else *q = late_attrs; if (!DECL_P (*decl_p) && *decl_p == TYPE_MAIN_VARIANT (*decl_p)) { /* We've added new attributes directly to the main variant, so now we need to update all of the other variants to include these new attributes. */ tree variant; for (variant = TYPE_NEXT_VARIANT (*decl_p); variant; variant = TYPE_NEXT_VARIANT (variant)) { gcc_assert (TYPE_ATTRIBUTES (variant) == old_attrs); TYPE_ATTRIBUTES (variant) = TYPE_ATTRIBUTES (*decl_p); } } } /* True if ATTRS contains any dependent attributes that affect type identity. */ bool any_dependent_type_attributes_p (tree attrs) { for (tree a = attrs; a; a = TREE_CHAIN (a)) if (ATTR_IS_DEPENDENT (a)) { const attribute_spec *as = lookup_attribute_spec (TREE_PURPOSE (a)); if (as && as->affects_type_identity) return true; } return false; } /* Return true iff ATTRS are acceptable attributes to be applied in-place to a typedef which gives a previously unnamed class or enum a name for linkage purposes. */ bool attributes_naming_typedef_ok (tree attrs) { for (; attrs; attrs = TREE_CHAIN (attrs)) { tree name = get_attribute_name (attrs); if (is_attribute_p ("vector_size", name)) return false; } return true; } /* Like reconstruct_complex_type, but handle also template trees. */ tree cp_reconstruct_complex_type (tree type, tree bottom) { tree inner, outer; bool late_return_type_p = false; if (TYPE_PTR_P (type)) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_pointer_type_for_mode (inner, TYPE_MODE (type), TYPE_REF_CAN_ALIAS_ALL (type)); } else if (TREE_CODE (type) == REFERENCE_TYPE) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_reference_type_for_mode (inner, TYPE_MODE (type), TYPE_REF_CAN_ALIAS_ALL (type)); } else if (TREE_CODE (type) == ARRAY_TYPE) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_cplus_array_type (inner, TYPE_DOMAIN (type)); /* Don't call cp_build_qualified_type on ARRAY_TYPEs, the element type qualification will be handled by the recursive cp_reconstruct_complex_type call and cp_build_qualified_type for ARRAY_TYPEs changes the element type. */ return outer; } else if (TREE_CODE (type) == FUNCTION_TYPE) { late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (type); inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_function_type (inner, TYPE_ARG_TYPES (type)); outer = apply_memfn_quals (outer, type_memfn_quals (type), type_memfn_rqual (type)); } else if (TREE_CODE (type) == METHOD_TYPE) { late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (type); inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); /* The build_method_type_directly() routine prepends 'this' to argument list, so we must compensate by getting rid of it. */ outer = build_method_type_directly (class_of_this_parm (type), inner, TREE_CHAIN (TYPE_ARG_TYPES (type))); } else if (TREE_CODE (type) == OFFSET_TYPE) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_offset_type (TYPE_OFFSET_BASETYPE (type), inner); } else return bottom; if (TYPE_ATTRIBUTES (type)) outer = cp_build_type_attribute_variant (outer, TYPE_ATTRIBUTES (type)); outer = cp_build_qualified_type (outer, cp_type_quals (type)); if (late_return_type_p) TYPE_HAS_LATE_RETURN_TYPE (outer) = 1; return outer; } /* Replaces any constexpr expression that may be into the attributes arguments with their reduced value. */ static void cp_check_const_attributes (tree attributes) { if (attributes == error_mark_node) return; tree attr; for (attr = attributes; attr; attr = TREE_CHAIN (attr)) { tree arg; for (arg = TREE_VALUE (attr); arg; arg = TREE_CHAIN (arg)) { tree expr = TREE_VALUE (arg); if (EXPR_P (expr)) TREE_VALUE (arg) = fold_non_dependent_expr (expr); } } } /* Return true if TYPE is an OpenMP mappable type. */ bool cp_omp_mappable_type (tree type) { /* Mappable type has to be complete. */ if (type == error_mark_node || !COMPLETE_TYPE_P (type)) return false; /* Arrays have mappable type if the elements have mappable type. */ while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); /* A mappable type cannot contain virtual members. */ if (CLASS_TYPE_P (type) && CLASSTYPE_VTABLES (type)) return false; /* All data members must be non-static. */ if (CLASS_TYPE_P (type)) { tree field; for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (VAR_P (field)) return false; /* All fields must have mappable types. */ else if (TREE_CODE (field) == FIELD_DECL && !cp_omp_mappable_type (TREE_TYPE (field))) return false; } return true; } /* Return the last pushed declaration for the symbol DECL or NULL when no such declaration exists. */ static tree find_last_decl (tree decl) { tree last_decl = NULL_TREE; if (tree name = DECL_P (decl) ? DECL_NAME (decl) : NULL_TREE) { /* Look up the declaration in its scope. */ tree pushed_scope = NULL_TREE; if (tree ctype = DECL_CONTEXT (decl)) pushed_scope = push_scope (ctype); last_decl = lookup_name (name); if (pushed_scope) pop_scope (pushed_scope); /* The declaration may be a member conversion operator or a bunch of overfloads (handle the latter below). */ if (last_decl && BASELINK_P (last_decl)) last_decl = BASELINK_FUNCTIONS (last_decl); } if (!last_decl) return NULL_TREE; if (DECL_P (last_decl) || TREE_CODE (last_decl) == OVERLOAD) { /* A set of overloads of the same function. */ for (lkp_iterator iter (last_decl); iter; ++iter) { if (TREE_CODE (*iter) == OVERLOAD) continue; if (decls_match (decl, *iter, /*record_decls=*/false)) return *iter; } return NULL_TREE; } return NULL_TREE; } /* Like decl_attributes, but handle C++ complexity. */ void cplus_decl_attributes (tree *decl, tree attributes, int flags) { if (*decl == NULL_TREE || *decl == void_type_node || *decl == error_mark_node) return; /* Add implicit "omp declare target" attribute if requested. */ if (scope_chain->omp_declare_target_attribute && ((VAR_P (*decl) && (TREE_STATIC (*decl) || DECL_EXTERNAL (*decl))) || TREE_CODE (*decl) == FUNCTION_DECL)) { if (VAR_P (*decl) && DECL_CLASS_SCOPE_P (*decl)) error ("%q+D static data member inside of declare target directive", *decl); else if (VAR_P (*decl) && (processing_template_decl || !cp_omp_mappable_type (TREE_TYPE (*decl)))) attributes = tree_cons (get_identifier ("omp declare target implicit"), NULL_TREE, attributes); else attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); } if (processing_template_decl) { if (check_for_bare_parameter_packs (attributes)) return; save_template_attributes (&attributes, decl, flags); } cp_check_const_attributes (attributes); if (TREE_CODE (*decl) == TEMPLATE_DECL) decl = &DECL_TEMPLATE_RESULT (*decl); if (TREE_TYPE (*decl) && TYPE_PTRMEMFUNC_P (TREE_TYPE (*decl))) { attributes = decl_attributes (decl, attributes, flags | ATTR_FLAG_FUNCTION_NEXT); decl_attributes (&TYPE_PTRMEMFUNC_FN_TYPE_RAW (TREE_TYPE (*decl)), attributes, flags); } else { tree last_decl = find_last_decl (*decl); decl_attributes (decl, attributes, flags, last_decl); } if (TREE_CODE (*decl) == TYPE_DECL) SET_IDENTIFIER_TYPE_VALUE (DECL_NAME (*decl), TREE_TYPE (*decl)); /* Propagate deprecation out to the template. */ if (TREE_DEPRECATED (*decl)) if (tree ti = get_template_info (*decl)) { tree tmpl = TI_TEMPLATE (ti); tree pattern = (TYPE_P (*decl) ? TREE_TYPE (tmpl) : DECL_TEMPLATE_RESULT (tmpl)); if (*decl == pattern) TREE_DEPRECATED (tmpl) = true; } } /* Walks through the namespace- or function-scope anonymous union OBJECT, with the indicated TYPE, building appropriate VAR_DECLs. Returns one of the fields for use in the mangled name. */ static tree build_anon_union_vars (tree type, tree object) { tree main_decl = NULL_TREE; tree field; /* Rather than write the code to handle the non-union case, just give an error. */ if (TREE_CODE (type) != UNION_TYPE) { error ("anonymous struct not inside named type"); return error_mark_node; } for (field = TYPE_FIELDS (type); field != NULL_TREE; field = DECL_CHAIN (field)) { tree decl; tree ref; if (DECL_ARTIFICIAL (field)) continue; if (TREE_CODE (field) != FIELD_DECL) { permerror (DECL_SOURCE_LOCATION (field), "%q#D invalid; an anonymous union can only " "have non-static data members", field); continue; } if (TREE_PRIVATE (field)) permerror (DECL_SOURCE_LOCATION (field), "private member %q#D in anonymous union", field); else if (TREE_PROTECTED (field)) permerror (DECL_SOURCE_LOCATION (field), "protected member %q#D in anonymous union", field); if (processing_template_decl) ref = build_min_nt_loc (UNKNOWN_LOCATION, COMPONENT_REF, object, DECL_NAME (field), NULL_TREE); else ref = build_class_member_access_expr (object, field, NULL_TREE, false, tf_warning_or_error); if (DECL_NAME (field)) { tree base; decl = build_decl (input_location, VAR_DECL, DECL_NAME (field), TREE_TYPE (field)); DECL_ANON_UNION_VAR_P (decl) = 1; DECL_ARTIFICIAL (decl) = 1; base = get_base_address (object); TREE_PUBLIC (decl) = TREE_PUBLIC (base); TREE_STATIC (decl) = TREE_STATIC (base); DECL_EXTERNAL (decl) = DECL_EXTERNAL (base); SET_DECL_VALUE_EXPR (decl, ref); DECL_HAS_VALUE_EXPR_P (decl) = 1; decl = pushdecl (decl); } else if (ANON_AGGR_TYPE_P (TREE_TYPE (field))) decl = build_anon_union_vars (TREE_TYPE (field), ref); else decl = 0; if (main_decl == NULL_TREE) main_decl = decl; } return main_decl; } /* Finish off the processing of a UNION_TYPE structure. If the union is an anonymous union, then all members must be laid out together. PUBLIC_P is nonzero if this union is not declared static. */ void finish_anon_union (tree anon_union_decl) { tree type; tree main_decl; bool public_p; if (anon_union_decl == error_mark_node) return; type = TREE_TYPE (anon_union_decl); public_p = TREE_PUBLIC (anon_union_decl); /* The VAR_DECL's context is the same as the TYPE's context. */ DECL_CONTEXT (anon_union_decl) = DECL_CONTEXT (TYPE_NAME (type)); if (TYPE_FIELDS (type) == NULL_TREE) return; if (public_p) { error ("namespace-scope anonymous aggregates must be static"); return; } main_decl = build_anon_union_vars (type, anon_union_decl); if (main_decl == error_mark_node) return; if (main_decl == NULL_TREE) { pedwarn (input_location, 0, "anonymous union with no members"); return; } if (!processing_template_decl) { /* Use main_decl to set the mangled name. */ DECL_NAME (anon_union_decl) = DECL_NAME (main_decl); maybe_commonize_var (anon_union_decl); if (TREE_STATIC (anon_union_decl) || DECL_EXTERNAL (anon_union_decl)) mangle_decl (anon_union_decl); DECL_NAME (anon_union_decl) = NULL_TREE; } pushdecl (anon_union_decl); cp_finish_decl (anon_union_decl, NULL_TREE, false, NULL_TREE, 0); } /* Auxiliary functions to make type signatures for `operator new' and `operator delete' correspond to what compiler will be expecting. */ tree coerce_new_type (tree type) { int e = 0; tree args = TYPE_ARG_TYPES (type); gcc_assert (TREE_CODE (type) == FUNCTION_TYPE); if (!same_type_p (TREE_TYPE (type), ptr_type_node)) { e = 1; error ("%<operator new%> must return type %qT", ptr_type_node); } if (args && args != void_list_node) { if (TREE_PURPOSE (args)) { /* [basic.stc.dynamic.allocation] The first parameter shall not have an associated default argument. */ error ("the first parameter of %<operator new%> cannot " "have a default argument"); /* Throw away the default argument. */ TREE_PURPOSE (args) = NULL_TREE; } if (!same_type_p (TREE_VALUE (args), size_type_node)) { e = 2; args = TREE_CHAIN (args); } } else e = 2; if (e == 2) permerror (input_location, "%<operator new%> takes type %<size_t%> (%qT) " "as first parameter", size_type_node); switch (e) { case 2: args = tree_cons (NULL_TREE, size_type_node, args); /* Fall through. */ case 1: type = build_exception_variant (build_function_type (ptr_type_node, args), TYPE_RAISES_EXCEPTIONS (type)); /* Fall through. */ default:; } return type; } tree coerce_delete_type (tree type) { int e = 0; tree args = TYPE_ARG_TYPES (type); gcc_assert (TREE_CODE (type) == FUNCTION_TYPE); if (!same_type_p (TREE_TYPE (type), void_type_node)) { e = 1; error ("%<operator delete%> must return type %qT", void_type_node); } if (!args || args == void_list_node || !same_type_p (TREE_VALUE (args), ptr_type_node)) { e = 2; if (args && args != void_list_node) args = TREE_CHAIN (args); error ("%<operator delete%> takes type %qT as first parameter", ptr_type_node); } switch (e) { case 2: args = tree_cons (NULL_TREE, ptr_type_node, args); /* Fall through. */ case 1: type = build_exception_variant (build_function_type (void_type_node, args), TYPE_RAISES_EXCEPTIONS (type)); /* Fall through. */ default:; } return type; } /* DECL is a VAR_DECL for a vtable: walk through the entries in the vtable and mark them as needed. */ static void mark_vtable_entries (tree decl) { tree fnaddr; unsigned HOST_WIDE_INT idx; /* It's OK for the vtable to refer to deprecated virtual functions. */ warning_sentinel w(warn_deprecated_decl); FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (DECL_INITIAL (decl)), idx, fnaddr) { tree fn; STRIP_NOPS (fnaddr); if (TREE_CODE (fnaddr) != ADDR_EXPR && TREE_CODE (fnaddr) != FDESC_EXPR) /* This entry is an offset: a virtual base class offset, a virtual call offset, an RTTI offset, etc. */ continue; fn = TREE_OPERAND (fnaddr, 0); TREE_ADDRESSABLE (fn) = 1; /* When we don't have vcall offsets, we output thunks whenever we output the vtables that contain them. With vcall offsets, we know all the thunks we'll need when we emit a virtual function, so we emit the thunks there instead. */ if (DECL_THUNK_P (fn)) use_thunk (fn, /*emit_p=*/0); /* Set the location, as marking the function could cause instantiation. We do not need to preserve the incoming location, as we're called from c_parse_final_cleanups, which takes care of that. */ input_location = DECL_SOURCE_LOCATION (fn); mark_used (fn); } } /* Set DECL up to have the closest approximation of "initialized common" linkage available. */ void comdat_linkage (tree decl) { if (flag_weak) make_decl_one_only (decl, cxx_comdat_group (decl)); else if (TREE_CODE (decl) == FUNCTION_DECL || (VAR_P (decl) && DECL_ARTIFICIAL (decl))) /* We can just emit function and compiler-generated variables statically; having multiple copies is (for the most part) only a waste of space. There are two correctness issues, however: the address of a template instantiation with external linkage should be the same, independent of what translation unit asks for the address, and this will not hold when we emit multiple copies of the function. However, there's little else we can do. Also, by default, the typeinfo implementation assumes that there will be only one copy of the string used as the name for each type. Therefore, if weak symbols are unavailable, the run-time library should perform a more conservative check; it should perform a string comparison, rather than an address comparison. */ TREE_PUBLIC (decl) = 0; else { /* Static data member template instantiations, however, cannot have multiple copies. */ if (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node) DECL_COMMON (decl) = 1; else if (EMPTY_CONSTRUCTOR_P (DECL_INITIAL (decl))) { DECL_COMMON (decl) = 1; DECL_INITIAL (decl) = error_mark_node; } else if (!DECL_EXPLICIT_INSTANTIATION (decl)) { /* We can't do anything useful; leave vars for explicit instantiation. */ DECL_EXTERNAL (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 0; } } if (TREE_PUBLIC (decl)) DECL_COMDAT (decl) = 1; } /* For win32 we also want to put explicit instantiations in linkonce sections, so that they will be merged with implicit instantiations; otherwise we get duplicate symbol errors. For Darwin we do not want explicit instantiations to be linkonce. */ void maybe_make_one_only (tree decl) { /* We used to say that this was not necessary on targets that support weak symbols, because the implicit instantiations will defer to the explicit one. However, that's not actually the case in SVR4; a strong definition after a weak one is an error. Also, not making explicit instantiations one_only means that we can end up with two copies of some template instantiations. */ if (! flag_weak) return; /* We can't set DECL_COMDAT on functions, or cp_finish_file will think we can get away with not emitting them if they aren't used. We need to for variables so that cp_finish_decl will update their linkage, because their DECL_INITIAL may not have been set properly yet. */ if (!TARGET_WEAK_NOT_IN_ARCHIVE_TOC || (! DECL_EXPLICIT_INSTANTIATION (decl) && ! DECL_TEMPLATE_SPECIALIZATION (decl))) { make_decl_one_only (decl, cxx_comdat_group (decl)); if (VAR_P (decl)) { varpool_node *node = varpool_node::get_create (decl); DECL_COMDAT (decl) = 1; /* Mark it needed so we don't forget to emit it. */ node->forced_by_abi = true; TREE_USED (decl) = 1; } } } /* Returns true iff DECL, a FUNCTION_DECL or VAR_DECL, has vague linkage. This predicate will give the right answer during parsing of the function, which other tests may not. */ bool vague_linkage_p (tree decl) { if (!TREE_PUBLIC (decl)) { /* maybe_thunk_body clears TREE_PUBLIC and DECL_ABSTRACT_P on the maybe-in-charge 'tor variants; in that case we need to check one of the "clones" for the real linkage. But only in that case; before maybe_clone_body we haven't yet copied the linkage to the clones. */ if ((DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (decl) || DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (decl)) && !DECL_ABSTRACT_P (decl) && DECL_CHAIN (decl) && DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl))) return vague_linkage_p (DECL_CHAIN (decl)); gcc_checking_assert (!DECL_COMDAT (decl)); return false; } /* Unfortunately, import_export_decl has not always been called before the function is processed, so we cannot simply check DECL_COMDAT. */ if (DECL_COMDAT (decl) || (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl)) || (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INSTANTIATION (decl)) || (VAR_P (decl) && DECL_INLINE_VAR_P (decl))) return true; else if (DECL_FUNCTION_SCOPE_P (decl)) /* A local static in an inline effectively has vague linkage. */ return (TREE_STATIC (decl) && vague_linkage_p (DECL_CONTEXT (decl))); else return false; } /* Determine whether or not we want to specifically import or export CTYPE, using various heuristics. */ static void import_export_class (tree ctype) { /* -1 for imported, 1 for exported. */ int import_export = 0; /* It only makes sense to call this function at EOF. The reason is that this function looks at whether or not the first non-inline non-abstract virtual member function has been defined in this translation unit. But, we can't possibly know that until we've seen the entire translation unit. */ gcc_assert (at_eof); if (CLASSTYPE_INTERFACE_KNOWN (ctype)) return; /* If MULTIPLE_SYMBOL_SPACES is set and we saw a #pragma interface, we will have CLASSTYPE_INTERFACE_ONLY set but not CLASSTYPE_INTERFACE_KNOWN. In that case, we don't want to use this heuristic because someone will supply a #pragma implementation elsewhere, and deducing it here would produce a conflict. */ if (CLASSTYPE_INTERFACE_ONLY (ctype)) return; if (lookup_attribute ("dllimport", TYPE_ATTRIBUTES (ctype))) import_export = -1; else if (lookup_attribute ("dllexport", TYPE_ATTRIBUTES (ctype))) import_export = 1; else if (CLASSTYPE_IMPLICIT_INSTANTIATION (ctype) && !flag_implicit_templates) /* For a template class, without -fimplicit-templates, check the repository. If the virtual table is assigned to this translation unit, then export the class; otherwise, import it. */ import_export = repo_export_class_p (ctype) ? 1 : -1; else if (TYPE_POLYMORPHIC_P (ctype)) { /* The ABI specifies that the virtual table and associated information are emitted with the key method, if any. */ tree method = CLASSTYPE_KEY_METHOD (ctype); /* If weak symbol support is not available, then we must be careful not to emit the vtable when the key function is inline. An inline function can be defined in multiple translation units. If we were to emit the vtable in each translation unit containing a definition, we would get multiple definition errors at link-time. */ if (method && (flag_weak || ! DECL_DECLARED_INLINE_P (method))) import_export = (DECL_REALLY_EXTERN (method) ? -1 : 1); } /* When MULTIPLE_SYMBOL_SPACES is set, we cannot count on seeing a definition anywhere else. */ if (MULTIPLE_SYMBOL_SPACES && import_export == -1) import_export = 0; /* Allow back ends the chance to overrule the decision. */ if (targetm.cxx.import_export_class) import_export = targetm.cxx.import_export_class (ctype, import_export); if (import_export) { SET_CLASSTYPE_INTERFACE_KNOWN (ctype); CLASSTYPE_INTERFACE_ONLY (ctype) = (import_export < 0); } } /* Return true if VAR has already been provided to the back end; in that case VAR should not be modified further by the front end. */ static bool var_finalized_p (tree var) { return varpool_node::get_create (var)->definition; } /* DECL is a VAR_DECL or FUNCTION_DECL which, for whatever reason, must be emitted in this translation unit. Mark it as such. */ void mark_needed (tree decl) { TREE_USED (decl) = 1; if (TREE_CODE (decl) == FUNCTION_DECL) { /* Extern inline functions don't become needed when referenced. If we know a method will be emitted in other TU and no new functions can be marked reachable, just use the external definition. */ struct cgraph_node *node = cgraph_node::get_create (decl); node->forced_by_abi = true; /* #pragma interface and -frepo code can call mark_needed for maybe-in-charge 'tors; mark the clones as well. */ tree clone; FOR_EACH_CLONE (clone, decl) mark_needed (clone); } else if (VAR_P (decl)) { varpool_node *node = varpool_node::get_create (decl); /* C++ frontend use mark_decl_references to force COMDAT variables to be output that might appear dead otherwise. */ node->forced_by_abi = true; } } /* DECL is either a FUNCTION_DECL or a VAR_DECL. This function returns true if a definition of this entity should be provided in this object file. Callers use this function to determine whether or not to let the back end know that a definition of DECL is available in this translation unit. */ bool decl_needed_p (tree decl) { gcc_assert (VAR_OR_FUNCTION_DECL_P (decl)); /* This function should only be called at the end of the translation unit. We cannot be sure of whether or not something will be COMDAT until that point. */ gcc_assert (at_eof); /* All entities with external linkage that are not COMDAT/EXTERN should be emitted; they may be referred to from other object files. */ if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_REALLY_EXTERN (decl)) return true; /* Functions marked "dllexport" must be emitted so that they are visible to other DLLs. */ if (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl))) return true; /* When not optimizing, do not bother to produce definitions for extern symbols. */ if (DECL_REALLY_EXTERN (decl) && ((TREE_CODE (decl) != FUNCTION_DECL && !optimize) || (TREE_CODE (decl) == FUNCTION_DECL && !opt_for_fn (decl, optimize))) && !lookup_attribute ("always_inline", decl)) return false; /* If this entity was used, let the back end see it; it will decide whether or not to emit it into the object file. */ if (TREE_USED (decl)) return true; /* Virtual functions might be needed for devirtualization. */ if (flag_devirtualize && TREE_CODE (decl) == FUNCTION_DECL && DECL_VIRTUAL_P (decl)) return true; /* Otherwise, DECL does not need to be emitted -- yet. A subsequent reference to DECL might cause it to be emitted later. */ return false; } /* If necessary, write out the vtables for the dynamic class CTYPE. Returns true if any vtables were emitted. */ static bool maybe_emit_vtables (tree ctype) { tree vtbl; tree primary_vtbl; int needed = 0; varpool_node *current = NULL, *last = NULL; /* If the vtables for this class have already been emitted there is nothing more to do. */ primary_vtbl = CLASSTYPE_VTABLES (ctype); if (var_finalized_p (primary_vtbl)) return false; /* Ignore dummy vtables made by get_vtable_decl. */ if (TREE_TYPE (primary_vtbl) == void_type_node) return false; /* On some targets, we cannot determine the key method until the end of the translation unit -- which is when this function is called. */ if (!targetm.cxx.key_method_may_be_inline ()) determine_key_method (ctype); /* See if any of the vtables are needed. */ for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl)) { import_export_decl (vtbl); if (DECL_NOT_REALLY_EXTERN (vtbl) && decl_needed_p (vtbl)) needed = 1; } if (!needed) { /* If the references to this class' vtables are optimized away, still emit the appropriate debugging information. See dfs_debug_mark. */ if (DECL_COMDAT (primary_vtbl) && CLASSTYPE_DEBUG_REQUESTED (ctype)) note_debug_info_needed (ctype); return false; } /* The ABI requires that we emit all of the vtables if we emit any of them. */ for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl)) { /* Mark entities references from the virtual table as used. */ mark_vtable_entries (vtbl); if (TREE_TYPE (DECL_INITIAL (vtbl)) == 0) { vec<tree, va_gc> *cleanups = NULL; tree expr = store_init_value (vtbl, DECL_INITIAL (vtbl), &cleanups, LOOKUP_NORMAL); /* It had better be all done at compile-time. */ gcc_assert (!expr && !cleanups); } /* Write it out. */ DECL_EXTERNAL (vtbl) = 0; rest_of_decl_compilation (vtbl, 1, 1); /* Because we're only doing syntax-checking, we'll never end up actually marking the variable as written. */ if (flag_syntax_only) TREE_ASM_WRITTEN (vtbl) = 1; else if (DECL_ONE_ONLY (vtbl)) { current = varpool_node::get_create (vtbl); if (last) current->add_to_same_comdat_group (last); last = current; } } /* Since we're writing out the vtable here, also write the debug info. */ note_debug_info_needed (ctype); return true; } /* A special return value from type_visibility meaning internal linkage. */ enum { VISIBILITY_ANON = VISIBILITY_INTERNAL+1 }; /* walk_tree helper function for type_visibility. */ static tree min_vis_r (tree *tp, int *walk_subtrees, void *data) { int *vis_p = (int *)data; if (! TYPE_P (*tp)) { *walk_subtrees = 0; } else if (OVERLOAD_TYPE_P (*tp) && !TREE_PUBLIC (TYPE_MAIN_DECL (*tp))) { *vis_p = VISIBILITY_ANON; return *tp; } else if (CLASS_TYPE_P (*tp) && CLASSTYPE_VISIBILITY (*tp) > *vis_p) *vis_p = CLASSTYPE_VISIBILITY (*tp); return NULL; } /* Returns the visibility of TYPE, which is the minimum visibility of its component types. */ static int type_visibility (tree type) { int vis = VISIBILITY_DEFAULT; cp_walk_tree_without_duplicates (&type, min_vis_r, &vis); return vis; } /* Limit the visibility of DECL to VISIBILITY, if not explicitly specified (or if VISIBILITY is static). If TMPL is true, this constraint is for a template argument, and takes precedence over explicitly-specified visibility on the template. */ static void constrain_visibility (tree decl, int visibility, bool tmpl) { if (visibility == VISIBILITY_ANON) { /* extern "C" declarations aren't affected by the anonymous namespace. */ if (!DECL_EXTERN_C_P (decl)) { TREE_PUBLIC (decl) = 0; DECL_WEAK (decl) = 0; DECL_COMMON (decl) = 0; DECL_COMDAT (decl) = false; if (VAR_OR_FUNCTION_DECL_P (decl)) { struct symtab_node *snode = symtab_node::get (decl); if (snode) snode->set_comdat_group (NULL); } DECL_INTERFACE_KNOWN (decl) = 1; if (DECL_LANG_SPECIFIC (decl)) DECL_NOT_REALLY_EXTERN (decl) = 1; } } else if (visibility > DECL_VISIBILITY (decl) && (tmpl || !DECL_VISIBILITY_SPECIFIED (decl))) { DECL_VISIBILITY (decl) = (enum symbol_visibility) visibility; /* This visibility was not specified. */ DECL_VISIBILITY_SPECIFIED (decl) = false; } } /* Constrain the visibility of DECL based on the visibility of its template arguments. */ static void constrain_visibility_for_template (tree decl, tree targs) { /* If this is a template instantiation, check the innermost template args for visibility constraints. The outer template args are covered by the class check. */ tree args = INNERMOST_TEMPLATE_ARGS (targs); int i; for (i = TREE_VEC_LENGTH (args); i > 0; --i) { int vis = 0; tree arg = TREE_VEC_ELT (args, i-1); if (TYPE_P (arg)) vis = type_visibility (arg); else { if (REFERENCE_REF_P (arg)) arg = TREE_OPERAND (arg, 0); if (TREE_TYPE (arg)) STRIP_NOPS (arg); if (TREE_CODE (arg) == ADDR_EXPR) arg = TREE_OPERAND (arg, 0); if (VAR_OR_FUNCTION_DECL_P (arg)) { if (! TREE_PUBLIC (arg)) vis = VISIBILITY_ANON; else vis = DECL_VISIBILITY (arg); } } if (vis) constrain_visibility (decl, vis, true); } } /* Like c_determine_visibility, but with additional C++-specific behavior. Function-scope entities can rely on the function's visibility because it is set in start_preparsed_function. Class-scope entities cannot rely on the class's visibility until the end of the enclosing class definition. Note that because namespaces have multiple independent definitions, namespace visibility is handled elsewhere using the #pragma visibility machinery rather than by decorating the namespace declaration. The goal is for constraints from the type to give a diagnostic, and other constraints to be applied silently. */ void determine_visibility (tree decl) { /* Remember that all decls get VISIBILITY_DEFAULT when built. */ /* Only relevant for names with external linkage. */ if (!TREE_PUBLIC (decl)) return; /* Cloned constructors and destructors get the same visibility as the underlying function. That should be set up in maybe_clone_body. */ gcc_assert (!DECL_CLONED_FUNCTION_P (decl)); bool orig_visibility_specified = DECL_VISIBILITY_SPECIFIED (decl); enum symbol_visibility orig_visibility = DECL_VISIBILITY (decl); /* The decl may be a template instantiation, which could influence visibilty. */ tree template_decl = NULL_TREE; if (TREE_CODE (decl) == TYPE_DECL) { if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (CLASSTYPE_USE_TEMPLATE (TREE_TYPE (decl))) template_decl = decl; } else if (TYPE_TEMPLATE_INFO (TREE_TYPE (decl))) template_decl = decl; } else if (DECL_LANG_SPECIFIC (decl) && DECL_USE_TEMPLATE (decl)) template_decl = decl; /* If DECL is a member of a class, visibility specifiers on the class can influence the visibility of the DECL. */ tree class_type = NULL_TREE; if (DECL_CLASS_SCOPE_P (decl)) class_type = DECL_CONTEXT (decl); else { /* Not a class member. */ /* Virtual tables have DECL_CONTEXT set to their associated class, so they are automatically handled above. */ gcc_assert (!VAR_P (decl) || !DECL_VTABLE_OR_VTT_P (decl)); if (DECL_FUNCTION_SCOPE_P (decl) && ! DECL_VISIBILITY_SPECIFIED (decl)) { /* Local statics and classes get the visibility of their containing function by default, except that -fvisibility-inlines-hidden doesn't affect them. */ tree fn = DECL_CONTEXT (decl); if (DECL_VISIBILITY_SPECIFIED (fn)) { DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn); DECL_VISIBILITY_SPECIFIED (decl) = DECL_VISIBILITY_SPECIFIED (fn); } else { if (DECL_CLASS_SCOPE_P (fn)) determine_visibility_from_class (decl, DECL_CONTEXT (fn)); else if (determine_hidden_inline (fn)) { DECL_VISIBILITY (decl) = default_visibility; DECL_VISIBILITY_SPECIFIED (decl) = visibility_options.inpragma; } else { DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn); DECL_VISIBILITY_SPECIFIED (decl) = DECL_VISIBILITY_SPECIFIED (fn); } } /* Local classes in templates have CLASSTYPE_USE_TEMPLATE set, but have no TEMPLATE_INFO, so don't try to check it. */ template_decl = NULL_TREE; } else if (VAR_P (decl) && DECL_TINFO_P (decl) && flag_visibility_ms_compat) { /* Under -fvisibility-ms-compat, types are visible by default, even though their contents aren't. */ tree underlying_type = TREE_TYPE (DECL_NAME (decl)); int underlying_vis = type_visibility (underlying_type); if (underlying_vis == VISIBILITY_ANON || (CLASS_TYPE_P (underlying_type) && CLASSTYPE_VISIBILITY_SPECIFIED (underlying_type))) constrain_visibility (decl, underlying_vis, false); else DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT; } else if (VAR_P (decl) && DECL_TINFO_P (decl)) { /* tinfo visibility is based on the type it's for. */ constrain_visibility (decl, type_visibility (TREE_TYPE (DECL_NAME (decl))), false); /* Give the target a chance to override the visibility associated with DECL. */ if (TREE_PUBLIC (decl) && !DECL_REALLY_EXTERN (decl) && CLASS_TYPE_P (TREE_TYPE (DECL_NAME (decl))) && !CLASSTYPE_VISIBILITY_SPECIFIED (TREE_TYPE (DECL_NAME (decl)))) targetm.cxx.determine_class_data_visibility (decl); } else if (template_decl) /* Template instantiations and specializations get visibility based on their template unless they override it with an attribute. */; else if (! DECL_VISIBILITY_SPECIFIED (decl)) { if (determine_hidden_inline (decl)) DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN; else { /* Set default visibility to whatever the user supplied with #pragma GCC visibility or a namespace visibility attribute. */ DECL_VISIBILITY (decl) = default_visibility; DECL_VISIBILITY_SPECIFIED (decl) = visibility_options.inpragma; } } } if (template_decl) { /* If the specialization doesn't specify visibility, use the visibility from the template. */ tree tinfo = get_template_info (template_decl); tree args = TI_ARGS (tinfo); tree attribs = (TREE_CODE (decl) == TYPE_DECL ? TYPE_ATTRIBUTES (TREE_TYPE (decl)) : DECL_ATTRIBUTES (decl)); if (args != error_mark_node) { tree pattern = DECL_TEMPLATE_RESULT (TI_TEMPLATE (tinfo)); if (!DECL_VISIBILITY_SPECIFIED (decl)) { if (!DECL_VISIBILITY_SPECIFIED (pattern) && determine_hidden_inline (decl)) DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN; else { DECL_VISIBILITY (decl) = DECL_VISIBILITY (pattern); DECL_VISIBILITY_SPECIFIED (decl) = DECL_VISIBILITY_SPECIFIED (pattern); } } if (args /* Template argument visibility outweighs #pragma or namespace visibility, but not an explicit attribute. */ && !lookup_attribute ("visibility", attribs)) { int depth = TMPL_ARGS_DEPTH (args); if (DECL_VISIBILITY_SPECIFIED (decl)) { /* A class template member with explicit visibility overrides the class visibility, so we need to apply all the levels of template args directly. */ int i; for (i = 1; i <= depth; ++i) { tree lev = TMPL_ARGS_LEVEL (args, i); constrain_visibility_for_template (decl, lev); } } else if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo))) /* Limit visibility based on its template arguments. */ constrain_visibility_for_template (decl, args); } } } if (class_type) determine_visibility_from_class (decl, class_type); if (decl_anon_ns_mem_p (decl)) /* Names in an anonymous namespace get internal linkage. This might change once we implement export. */ constrain_visibility (decl, VISIBILITY_ANON, false); else if (TREE_CODE (decl) != TYPE_DECL) { /* Propagate anonymity from type to decl. */ int tvis = type_visibility (TREE_TYPE (decl)); if (tvis == VISIBILITY_ANON || ! DECL_VISIBILITY_SPECIFIED (decl)) constrain_visibility (decl, tvis, false); } else if (no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/true)) /* DR 757: A type without linkage shall not be used as the type of a variable or function with linkage, unless o the variable or function has extern "C" linkage (7.5 [dcl.link]), or o the variable or function is not used (3.2 [basic.def.odr]) or is defined in the same translation unit. Since non-extern "C" decls need to be defined in the same translation unit, we can make the type internal. */ constrain_visibility (decl, VISIBILITY_ANON, false); /* If visibility changed and DECL already has DECL_RTL, ensure symbol flags are updated. */ if ((DECL_VISIBILITY (decl) != orig_visibility || DECL_VISIBILITY_SPECIFIED (decl) != orig_visibility_specified) && ((VAR_P (decl) && TREE_STATIC (decl)) || TREE_CODE (decl) == FUNCTION_DECL) && DECL_RTL_SET_P (decl)) make_decl_rtl (decl); } /* By default, static data members and function members receive the visibility of their containing class. */ static void determine_visibility_from_class (tree decl, tree class_type) { if (DECL_VISIBILITY_SPECIFIED (decl)) return; if (determine_hidden_inline (decl)) DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN; else { /* Default to the class visibility. */ DECL_VISIBILITY (decl) = CLASSTYPE_VISIBILITY (class_type); DECL_VISIBILITY_SPECIFIED (decl) = CLASSTYPE_VISIBILITY_SPECIFIED (class_type); } /* Give the target a chance to override the visibility associated with DECL. */ if (VAR_P (decl) && (DECL_TINFO_P (decl) || (DECL_VTABLE_OR_VTT_P (decl) /* Construction virtual tables are not exported because they cannot be referred to from other object files; their name is not standardized by the ABI. */ && !DECL_CONSTRUCTION_VTABLE_P (decl))) && TREE_PUBLIC (decl) && !DECL_REALLY_EXTERN (decl) && !CLASSTYPE_VISIBILITY_SPECIFIED (class_type)) targetm.cxx.determine_class_data_visibility (decl); } /* Returns true iff DECL is an inline that should get hidden visibility because of -fvisibility-inlines-hidden. */ static bool determine_hidden_inline (tree decl) { return (visibility_options.inlines_hidden /* Don't do this for inline templates; specializations might not be inline, and we don't want them to inherit the hidden visibility. We'll set it here for all inline instantiations. */ && !processing_template_decl && TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && (! DECL_LANG_SPECIFIC (decl) || ! DECL_EXPLICIT_INSTANTIATION (decl))); } /* Constrain the visibility of a class TYPE based on the visibility of its field types. Warn if any fields require lesser visibility. */ void constrain_class_visibility (tree type) { tree binfo; tree t; int i; int vis = type_visibility (type); if (vis == VISIBILITY_ANON || DECL_IN_SYSTEM_HEADER (TYPE_MAIN_DECL (type))) return; /* Don't warn about visibility if the class has explicit visibility. */ if (CLASSTYPE_VISIBILITY_SPECIFIED (type)) vis = VISIBILITY_INTERNAL; for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t)) if (TREE_CODE (t) == FIELD_DECL && TREE_TYPE (t) != error_mark_node && !DECL_ARTIFICIAL (t)) { tree ftype = strip_pointer_or_array_types (TREE_TYPE (t)); int subvis = type_visibility (ftype); if (subvis == VISIBILITY_ANON) { if (!in_main_input_context()) { tree nlt = no_linkage_check (ftype, /*relaxed_p=*/false); if (nlt) { if (same_type_p (TREE_TYPE (t), nlt)) warning (OPT_Wsubobject_linkage, "\ %qT has a field %qD whose type has no linkage", type, t); else warning (OPT_Wsubobject_linkage, "\ %qT has a field %qD whose type depends on the type %qT which has no linkage", type, t, nlt); } else warning (OPT_Wsubobject_linkage, "\ %qT has a field %qD whose type uses the anonymous namespace", type, t); } } else if (MAYBE_CLASS_TYPE_P (ftype) && vis < VISIBILITY_HIDDEN && subvis >= VISIBILITY_HIDDEN) warning (OPT_Wattributes, "\ %qT declared with greater visibility than the type of its field %qD", type, t); } binfo = TYPE_BINFO (type); for (i = 0; BINFO_BASE_ITERATE (binfo, i, t); ++i) { int subvis = type_visibility (TREE_TYPE (t)); if (subvis == VISIBILITY_ANON) { if (!in_main_input_context()) { tree nlt = no_linkage_check (TREE_TYPE (t), /*relaxed_p=*/false); if (nlt) { if (same_type_p (TREE_TYPE (t), nlt)) warning (OPT_Wsubobject_linkage, "\ %qT has a base %qT whose type has no linkage", type, TREE_TYPE (t)); else warning (OPT_Wsubobject_linkage, "\ %qT has a base %qT whose type depends on the type %qT which has no linkage", type, TREE_TYPE (t), nlt); } else warning (OPT_Wsubobject_linkage, "\ %qT has a base %qT whose type uses the anonymous namespace", type, TREE_TYPE (t)); } } else if (vis < VISIBILITY_HIDDEN && subvis >= VISIBILITY_HIDDEN) warning (OPT_Wattributes, "\ %qT declared with greater visibility than its base %qT", type, TREE_TYPE (t)); } } /* Functions for adjusting the visibility of a tagged type and its nested types and declarations when it gets a name for linkage purposes from a typedef. */ static void bt_reset_linkage_1 (binding_entry, void *); static void bt_reset_linkage_2 (binding_entry, void *); /* First reset the visibility of all the types. */ static void reset_type_linkage_1 (tree type) { set_linkage_according_to_type (type, TYPE_MAIN_DECL (type)); if (CLASS_TYPE_P (type)) binding_table_foreach (CLASSTYPE_NESTED_UTDS (type), bt_reset_linkage_1, NULL); } static void bt_reset_linkage_1 (binding_entry b, void */*data*/) { reset_type_linkage_1 (b->type); } /* Then reset the visibility of any static data members or member functions that use those types. */ static void reset_decl_linkage (tree decl) { if (TREE_PUBLIC (decl)) return; if (DECL_CLONED_FUNCTION_P (decl)) return; TREE_PUBLIC (decl) = true; DECL_INTERFACE_KNOWN (decl) = false; determine_visibility (decl); tentative_decl_linkage (decl); } static void reset_type_linkage_2 (tree type) { if (CLASS_TYPE_P (type)) { if (tree vt = CLASSTYPE_VTABLES (type)) { tree name = mangle_vtbl_for_type (type); DECL_NAME (vt) = name; SET_DECL_ASSEMBLER_NAME (vt, name); reset_decl_linkage (vt); } if (tree ti = CLASSTYPE_TYPEINFO_VAR (type)) { tree name = mangle_typeinfo_for_type (type); DECL_NAME (ti) = name; SET_DECL_ASSEMBLER_NAME (ti, name); TREE_TYPE (name) = type; reset_decl_linkage (ti); } for (tree m = TYPE_FIELDS (type); m; m = DECL_CHAIN (m)) { tree mem = STRIP_TEMPLATE (m); if (TREE_CODE (mem) == VAR_DECL || TREE_CODE (mem) == FUNCTION_DECL) reset_decl_linkage (mem); } binding_table_foreach (CLASSTYPE_NESTED_UTDS (type), bt_reset_linkage_2, NULL); } } static void bt_reset_linkage_2 (binding_entry b, void */*data*/) { reset_type_linkage_2 (b->type); } void reset_type_linkage (tree type) { reset_type_linkage_1 (type); reset_type_linkage_2 (type); } /* Set up our initial idea of what the linkage of DECL should be. */ void tentative_decl_linkage (tree decl) { if (DECL_INTERFACE_KNOWN (decl)) /* We've already made a decision as to how this function will be handled. */; else if (vague_linkage_p (decl)) { if (TREE_CODE (decl) == FUNCTION_DECL && decl_defined_p (decl)) { DECL_EXTERNAL (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 1; note_vague_linkage_fn (decl); /* A non-template inline function with external linkage will always be COMDAT. As we must eventually determine the linkage of all functions, and as that causes writes to the data mapped in from the PCH file, it's advantageous to mark the functions at this point. */ if (DECL_DECLARED_INLINE_P (decl) && (!DECL_IMPLICIT_INSTANTIATION (decl) || DECL_DEFAULTED_FN (decl))) { /* This function must have external linkage, as otherwise DECL_INTERFACE_KNOWN would have been set. */ gcc_assert (TREE_PUBLIC (decl)); comdat_linkage (decl); DECL_INTERFACE_KNOWN (decl) = 1; } } else if (VAR_P (decl)) maybe_commonize_var (decl); } } /* DECL is a FUNCTION_DECL or VAR_DECL. If the object file linkage for DECL has not already been determined, do so now by setting DECL_EXTERNAL, DECL_COMDAT and other related flags. Until this function is called entities with vague linkage whose definitions are available must have TREE_PUBLIC set. If this function decides to place DECL in COMDAT, it will set appropriate flags -- but will not clear DECL_EXTERNAL. It is up to the caller to decide whether or not to clear DECL_EXTERNAL. Some callers defer that decision until it is clear that DECL is actually required. */ void import_export_decl (tree decl) { int emit_p; bool comdat_p; bool import_p; tree class_type = NULL_TREE; if (DECL_INTERFACE_KNOWN (decl)) return; /* We cannot determine what linkage to give to an entity with vague linkage until the end of the file. For example, a virtual table for a class will be defined if and only if the key method is defined in this translation unit. As a further example, consider that when compiling a translation unit that uses PCH file with "-frepo" it would be incorrect to make decisions about what entities to emit when building the PCH; those decisions must be delayed until the repository information has been processed. */ gcc_assert (at_eof); /* Object file linkage for explicit instantiations is handled in mark_decl_instantiated. For static variables in functions with vague linkage, maybe_commonize_var is used. Therefore, the only declarations that should be provided to this function are those with external linkage that are: * implicit instantiations of function templates * inline function * implicit instantiations of static data members of class templates * virtual tables * typeinfo objects Furthermore, all entities that reach this point must have a definition available in this translation unit. The following assertions check these conditions. */ gcc_assert (VAR_OR_FUNCTION_DECL_P (decl)); /* Any code that creates entities with TREE_PUBLIC cleared should also set DECL_INTERFACE_KNOWN. */ gcc_assert (TREE_PUBLIC (decl)); if (TREE_CODE (decl) == FUNCTION_DECL) gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl) || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl) || DECL_DECLARED_INLINE_P (decl)); else gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl) || DECL_VTABLE_OR_VTT_P (decl) || DECL_TINFO_P (decl)); /* Check that a definition of DECL is available in this translation unit. */ gcc_assert (!DECL_REALLY_EXTERN (decl)); /* Assume that DECL will not have COMDAT linkage. */ comdat_p = false; /* Assume that DECL will not be imported into this translation unit. */ import_p = false; /* See if the repository tells us whether or not to emit DECL in this translation unit. */ emit_p = repo_emit_p (decl); if (emit_p == 0) import_p = true; else if (emit_p == 1) { /* The repository indicates that this entity should be defined here. Make sure the back end honors that request. */ mark_needed (decl); /* Output the definition as an ordinary strong definition. */ DECL_EXTERNAL (decl) = 0; DECL_INTERFACE_KNOWN (decl) = 1; return; } if (import_p) /* We have already decided what to do with this DECL; there is no need to check anything further. */ ; else if (VAR_P (decl) && DECL_VTABLE_OR_VTT_P (decl)) { class_type = DECL_CONTEXT (decl); import_export_class (class_type); if (CLASSTYPE_INTERFACE_KNOWN (class_type) && CLASSTYPE_INTERFACE_ONLY (class_type)) import_p = true; else if ((!flag_weak || TARGET_WEAK_NOT_IN_ARCHIVE_TOC) && !CLASSTYPE_USE_TEMPLATE (class_type) && CLASSTYPE_KEY_METHOD (class_type) && !DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type))) /* The ABI requires that all virtual tables be emitted with COMDAT linkage. However, on systems where COMDAT symbols don't show up in the table of contents for a static archive, or on systems without weak symbols (where we approximate COMDAT linkage by using internal linkage), the linker will report errors about undefined symbols because it will not see the virtual table definition. Therefore, in the case that we know that the virtual table will be emitted in only one translation unit, we make the virtual table an ordinary definition with external linkage. */ DECL_EXTERNAL (decl) = 0; else if (CLASSTYPE_INTERFACE_KNOWN (class_type)) { /* CLASS_TYPE is being exported from this translation unit, so DECL should be defined here. */ if (!flag_weak && CLASSTYPE_EXPLICIT_INSTANTIATION (class_type)) /* If a class is declared in a header with the "extern template" extension, then it will not be instantiated, even in translation units that would normally require it. Often such classes are explicitly instantiated in one translation unit. Therefore, the explicit instantiation must be made visible to other translation units. */ DECL_EXTERNAL (decl) = 0; else { /* The generic C++ ABI says that class data is always COMDAT, even if there is a key function. Some variants (e.g., the ARM EABI) says that class data only has COMDAT linkage if the class data might be emitted in more than one translation unit. When the key method can be inline and is inline, we still have to arrange for comdat even though class_data_always_comdat is false. */ if (!CLASSTYPE_KEY_METHOD (class_type) || DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type)) || targetm.cxx.class_data_always_comdat ()) { /* The ABI requires COMDAT linkage. Normally, we only emit COMDAT things when they are needed; make sure that we realize that this entity is indeed needed. */ comdat_p = true; mark_needed (decl); } } } else if (!flag_implicit_templates && CLASSTYPE_IMPLICIT_INSTANTIATION (class_type)) import_p = true; else comdat_p = true; } else if (VAR_P (decl) && DECL_TINFO_P (decl)) { tree type = TREE_TYPE (DECL_NAME (decl)); if (CLASS_TYPE_P (type)) { class_type = type; import_export_class (type); if (CLASSTYPE_INTERFACE_KNOWN (type) && TYPE_POLYMORPHIC_P (type) && CLASSTYPE_INTERFACE_ONLY (type) /* If -fno-rtti was specified, then we cannot be sure that RTTI information will be emitted with the virtual table of the class, so we must emit it wherever it is used. */ && flag_rtti) import_p = true; else { if (CLASSTYPE_INTERFACE_KNOWN (type) && !CLASSTYPE_INTERFACE_ONLY (type)) { comdat_p = (targetm.cxx.class_data_always_comdat () || (CLASSTYPE_KEY_METHOD (type) && DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (type)))); mark_needed (decl); if (!flag_weak) { comdat_p = false; DECL_EXTERNAL (decl) = 0; } } else comdat_p = true; } } else comdat_p = true; } else if (DECL_TEMPLOID_INSTANTIATION (decl)) { /* DECL is an implicit instantiation of a function or static data member. */ if ((flag_implicit_templates && !flag_use_repository) || (flag_implicit_inline_templates && TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl))) comdat_p = true; else /* If we are not implicitly generating templates, then mark this entity as undefined in this translation unit. */ import_p = true; } else if (DECL_FUNCTION_MEMBER_P (decl)) { if (!DECL_DECLARED_INLINE_P (decl)) { tree ctype = DECL_CONTEXT (decl); import_export_class (ctype); if (CLASSTYPE_INTERFACE_KNOWN (ctype)) { DECL_NOT_REALLY_EXTERN (decl) = ! (CLASSTYPE_INTERFACE_ONLY (ctype) || (DECL_DECLARED_INLINE_P (decl) && ! flag_implement_inlines && !DECL_VINDEX (decl))); if (!DECL_NOT_REALLY_EXTERN (decl)) DECL_EXTERNAL (decl) = 1; /* Always make artificials weak. */ if (DECL_ARTIFICIAL (decl) && flag_weak) comdat_p = true; else maybe_make_one_only (decl); } } else comdat_p = true; } else comdat_p = true; if (import_p) { /* If we are importing DECL into this translation unit, mark is an undefined here. */ DECL_EXTERNAL (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 0; } else if (comdat_p) { /* If we decided to put DECL in COMDAT, mark it accordingly at this point. */ comdat_linkage (decl); } DECL_INTERFACE_KNOWN (decl) = 1; } /* Return an expression that performs the destruction of DECL, which must be a VAR_DECL whose type has a non-trivial destructor, or is an array whose (innermost) elements have a non-trivial destructor. */ tree build_cleanup (tree decl) { tree clean = cxx_maybe_build_cleanup (decl, tf_warning_or_error); gcc_assert (clean != NULL_TREE); return clean; } /* Returns the initialization guard variable for the variable DECL, which has static storage duration. */ tree get_guard (tree decl) { tree sname; tree guard; sname = mangle_guard_variable (decl); guard = get_global_binding (sname); if (! guard) { tree guard_type; /* We use a type that is big enough to contain a mutex as well as an integer counter. */ guard_type = targetm.cxx.guard_type (); guard = build_decl (DECL_SOURCE_LOCATION (decl), VAR_DECL, sname, guard_type); /* The guard should have the same linkage as what it guards. */ TREE_PUBLIC (guard) = TREE_PUBLIC (decl); TREE_STATIC (guard) = TREE_STATIC (decl); DECL_COMMON (guard) = DECL_COMMON (decl); DECL_COMDAT (guard) = DECL_COMDAT (decl); CP_DECL_THREAD_LOCAL_P (guard) = CP_DECL_THREAD_LOCAL_P (decl); set_decl_tls_model (guard, DECL_TLS_MODEL (decl)); if (DECL_ONE_ONLY (decl)) make_decl_one_only (guard, cxx_comdat_group (guard)); if (TREE_PUBLIC (decl)) DECL_WEAK (guard) = DECL_WEAK (decl); DECL_VISIBILITY (guard) = DECL_VISIBILITY (decl); DECL_VISIBILITY_SPECIFIED (guard) = DECL_VISIBILITY_SPECIFIED (decl); DECL_ARTIFICIAL (guard) = 1; DECL_IGNORED_P (guard) = 1; TREE_USED (guard) = 1; pushdecl_top_level_and_finish (guard, NULL_TREE); } return guard; } /* Return an atomic load of src with the appropriate memory model. */ static tree build_atomic_load_byte (tree src, HOST_WIDE_INT model) { tree ptr_type = build_pointer_type (char_type_node); tree mem_model = build_int_cst (integer_type_node, model); tree t, addr, val; unsigned int size; int fncode; size = tree_to_uhwi (TYPE_SIZE_UNIT (char_type_node)); fncode = BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1; t = builtin_decl_implicit ((enum built_in_function) fncode); addr = build1 (ADDR_EXPR, ptr_type, src); val = build_call_expr (t, 2, addr, mem_model); return val; } /* Return those bits of the GUARD variable that should be set when the guarded entity is actually initialized. */ static tree get_guard_bits (tree guard) { if (!targetm.cxx.guard_mask_bit ()) { /* We only set the first byte of the guard, in order to leave room for a mutex in the high-order bits. */ guard = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (guard)), guard); guard = build1 (NOP_EXPR, build_pointer_type (char_type_node), guard); guard = build1 (INDIRECT_REF, char_type_node, guard); } return guard; } /* Return an expression which determines whether or not the GUARD variable has already been initialized. */ tree get_guard_cond (tree guard, bool thread_safe) { tree guard_value; if (!thread_safe) guard = get_guard_bits (guard); else guard = build_atomic_load_byte (guard, MEMMODEL_ACQUIRE); /* Mask off all but the low bit. */ if (targetm.cxx.guard_mask_bit ()) { guard_value = integer_one_node; if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard))) guard_value = fold_convert (TREE_TYPE (guard), guard_value); guard = cp_build_binary_op (input_location, BIT_AND_EXPR, guard, guard_value, tf_warning_or_error); } guard_value = integer_zero_node; if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard))) guard_value = fold_convert (TREE_TYPE (guard), guard_value); return cp_build_binary_op (input_location, EQ_EXPR, guard, guard_value, tf_warning_or_error); } /* Return an expression which sets the GUARD variable, indicating that the variable being guarded has been initialized. */ tree set_guard (tree guard) { tree guard_init; /* Set the GUARD to one. */ guard = get_guard_bits (guard); guard_init = integer_one_node; if (!same_type_p (TREE_TYPE (guard_init), TREE_TYPE (guard))) guard_init = fold_convert (TREE_TYPE (guard), guard_init); return cp_build_modify_expr (input_location, guard, NOP_EXPR, guard_init, tf_warning_or_error); } /* Returns true iff we can tell that VAR does not have a dynamic initializer. */ static bool var_defined_without_dynamic_init (tree var) { /* If it's defined in another TU, we can't tell. */ if (DECL_EXTERNAL (var)) return false; /* If it has a non-trivial destructor, registering the destructor counts as dynamic initialization. */ if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (var))) return false; /* If it's in this TU, its initializer has been processed, unless it's a case of self-initialization, then DECL_INITIALIZED_P is false while the initializer is handled by finish_id_expression. */ if (!DECL_INITIALIZED_P (var)) return false; /* If it has no initializer or a constant one, it's not dynamic. */ return (!DECL_NONTRIVIALLY_INITIALIZED_P (var) || DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (var)); } /* Returns true iff VAR is a variable that needs uses to be wrapped for possible dynamic initialization. */ static bool var_needs_tls_wrapper (tree var) { return (!error_operand_p (var) && CP_DECL_THREAD_LOCAL_P (var) && !DECL_GNU_TLS_P (var) && !DECL_FUNCTION_SCOPE_P (var) && !var_defined_without_dynamic_init (var)); } /* Get the FUNCTION_DECL for the shared TLS init function for this translation unit. */ static tree get_local_tls_init_fn (void) { tree sname = get_identifier ("__tls_init"); tree fn = get_global_binding (sname); if (!fn) { fn = build_lang_decl (FUNCTION_DECL, sname, build_function_type (void_type_node, void_list_node)); SET_DECL_LANGUAGE (fn, lang_c); TREE_PUBLIC (fn) = false; DECL_ARTIFICIAL (fn) = true; mark_used (fn); set_global_binding (fn); } return fn; } /* Get a FUNCTION_DECL for the init function for the thread_local variable VAR. The init function will be an alias to the function that initializes all the non-local TLS variables in the translation unit. The init function is only used by the wrapper function. */ static tree get_tls_init_fn (tree var) { /* Only C++11 TLS vars need this init fn. */ if (!var_needs_tls_wrapper (var)) return NULL_TREE; /* If -fno-extern-tls-init, assume that we don't need to call a tls init function for a variable defined in another TU. */ if (!flag_extern_tls_init && DECL_EXTERNAL (var)) return NULL_TREE; /* If the variable is internal, or if we can't generate aliases, call the local init function directly. */ if (!TREE_PUBLIC (var) || !TARGET_SUPPORTS_ALIASES) return get_local_tls_init_fn (); tree sname = mangle_tls_init_fn (var); tree fn = get_global_binding (sname); if (!fn) { fn = build_lang_decl (FUNCTION_DECL, sname, build_function_type (void_type_node, void_list_node)); SET_DECL_LANGUAGE (fn, lang_c); TREE_PUBLIC (fn) = TREE_PUBLIC (var); DECL_ARTIFICIAL (fn) = true; DECL_COMDAT (fn) = DECL_COMDAT (var); DECL_EXTERNAL (fn) = DECL_EXTERNAL (var); if (DECL_ONE_ONLY (var)) make_decl_one_only (fn, cxx_comdat_group (fn)); if (TREE_PUBLIC (var)) { tree obtype = strip_array_types (non_reference (TREE_TYPE (var))); /* If the variable is defined somewhere else and might have static initialization, make the init function a weak reference. */ if ((!TYPE_NEEDS_CONSTRUCTING (obtype) || TYPE_HAS_CONSTEXPR_CTOR (obtype) || TYPE_HAS_TRIVIAL_DFLT (obtype)) && TYPE_HAS_TRIVIAL_DESTRUCTOR (obtype) && DECL_EXTERNAL (var)) declare_weak (fn); else DECL_WEAK (fn) = DECL_WEAK (var); } DECL_VISIBILITY (fn) = DECL_VISIBILITY (var); DECL_VISIBILITY_SPECIFIED (fn) = DECL_VISIBILITY_SPECIFIED (var); DECL_DLLIMPORT_P (fn) = DECL_DLLIMPORT_P (var); DECL_IGNORED_P (fn) = 1; mark_used (fn); DECL_BEFRIENDING_CLASSES (fn) = var; set_global_binding (fn); } return fn; } /* Get a FUNCTION_DECL for the init wrapper function for the thread_local variable VAR. The wrapper function calls the init function (if any) for VAR and then returns a reference to VAR. The wrapper function is used in place of VAR everywhere VAR is mentioned. */ tree get_tls_wrapper_fn (tree var) { /* Only C++11 TLS vars need this wrapper fn. */ if (!var_needs_tls_wrapper (var)) return NULL_TREE; tree sname = mangle_tls_wrapper_fn (var); tree fn = get_global_binding (sname); if (!fn) { /* A named rvalue reference is an lvalue, so the wrapper should always return an lvalue reference. */ tree type = non_reference (TREE_TYPE (var)); type = build_reference_type (type); tree fntype = build_function_type (type, void_list_node); fn = build_lang_decl (FUNCTION_DECL, sname, fntype); SET_DECL_LANGUAGE (fn, lang_c); TREE_PUBLIC (fn) = TREE_PUBLIC (var); DECL_ARTIFICIAL (fn) = true; DECL_IGNORED_P (fn) = 1; /* The wrapper is inline and emitted everywhere var is used. */ DECL_DECLARED_INLINE_P (fn) = true; if (TREE_PUBLIC (var)) { comdat_linkage (fn); #ifdef HAVE_GAS_HIDDEN /* Make the wrapper bind locally; there's no reason to share the wrapper between multiple shared objects. */ DECL_VISIBILITY (fn) = VISIBILITY_INTERNAL; DECL_VISIBILITY_SPECIFIED (fn) = true; #endif } if (!TREE_PUBLIC (fn)) DECL_INTERFACE_KNOWN (fn) = true; mark_used (fn); note_vague_linkage_fn (fn); #if 0 /* We want CSE to commonize calls to the wrapper, but marking it as pure is unsafe since it has side-effects. I guess we need a new ECF flag even weaker than ECF_PURE. FIXME! */ DECL_PURE_P (fn) = true; #endif DECL_BEFRIENDING_CLASSES (fn) = var; set_global_binding (fn); } return fn; } /* At EOF, generate the definition for the TLS wrapper function FN: T& var_wrapper() { if (init_fn) init_fn(); return var; } */ static void generate_tls_wrapper (tree fn) { tree var = DECL_BEFRIENDING_CLASSES (fn); start_preparsed_function (fn, NULL_TREE, SF_DEFAULT | SF_PRE_PARSED); tree body = begin_function_body (); /* Only call the init fn if there might be one. */ if (tree init_fn = get_tls_init_fn (var)) { tree if_stmt = NULL_TREE; /* If init_fn is a weakref, make sure it exists before calling. */ if (lookup_attribute ("weak", DECL_ATTRIBUTES (init_fn))) { if_stmt = begin_if_stmt (); tree addr = cp_build_addr_expr (init_fn, tf_warning_or_error); tree cond = cp_build_binary_op (DECL_SOURCE_LOCATION (var), NE_EXPR, addr, nullptr_node, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); } finish_expr_stmt (build_cxx_call (init_fn, 0, NULL, tf_warning_or_error)); if (if_stmt) { finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } } else /* If there's no initialization, the wrapper is a constant function. */ TREE_READONLY (fn) = true; finish_return_stmt (convert_from_reference (var)); finish_function_body (body); expand_or_defer_fn (finish_function (/*inline_p=*/false)); } /* Start the process of running a particular set of global constructors or destructors. Subroutine of do_[cd]tors. Also called from vtv_start_verification_constructor_init_function. */ static tree start_objects (int method_type, int initp) { tree body; tree fndecl; char type[14]; /* Make ctor or dtor function. METHOD_TYPE may be 'I' or 'D'. */ if (initp != DEFAULT_INIT_PRIORITY) { char joiner; #ifdef JOINER joiner = JOINER; #else joiner = '_'; #endif sprintf (type, "sub_%c%c%.5u", method_type, joiner, initp); } else sprintf (type, "sub_%c", method_type); fndecl = build_lang_decl (FUNCTION_DECL, get_file_function_name (type), build_function_type_list (void_type_node, NULL_TREE)); start_preparsed_function (fndecl, /*attrs=*/NULL_TREE, SF_PRE_PARSED); TREE_PUBLIC (current_function_decl) = 0; /* Mark as artificial because it's not explicitly in the user's source code. */ DECL_ARTIFICIAL (current_function_decl) = 1; /* Mark this declaration as used to avoid spurious warnings. */ TREE_USED (current_function_decl) = 1; /* Mark this function as a global constructor or destructor. */ if (method_type == 'I') DECL_GLOBAL_CTOR_P (current_function_decl) = 1; else DECL_GLOBAL_DTOR_P (current_function_decl) = 1; body = begin_compound_stmt (BCS_FN_BODY); return body; } /* Finish the process of running a particular set of global constructors or destructors. Subroutine of do_[cd]tors. */ static void finish_objects (int method_type, int initp, tree body) { tree fn; /* Finish up. */ finish_compound_stmt (body); fn = finish_function (/*inline_p=*/false); if (method_type == 'I') { DECL_STATIC_CONSTRUCTOR (fn) = 1; decl_init_priority_insert (fn, initp); } else { DECL_STATIC_DESTRUCTOR (fn) = 1; decl_fini_priority_insert (fn, initp); } expand_or_defer_fn (fn); } /* The names of the parameters to the function created to handle initializations and destructions for objects with static storage duration. */ #define INITIALIZE_P_IDENTIFIER "__initialize_p" #define PRIORITY_IDENTIFIER "__priority" /* The name of the function we create to handle initializations and destructions for objects with static storage duration. */ #define SSDF_IDENTIFIER "__static_initialization_and_destruction" /* The declaration for the __INITIALIZE_P argument. */ static GTY(()) tree initialize_p_decl; /* The declaration for the __PRIORITY argument. */ static GTY(()) tree priority_decl; /* The declaration for the static storage duration function. */ static GTY(()) tree ssdf_decl; /* All the static storage duration functions created in this translation unit. */ static GTY(()) vec<tree, va_gc> *ssdf_decls; /* A map from priority levels to information about that priority level. There may be many such levels, so efficient lookup is important. */ static splay_tree priority_info_map; /* Begins the generation of the function that will handle all initialization and destruction of objects with static storage duration. The function generated takes two parameters of type `int': __INITIALIZE_P and __PRIORITY. If __INITIALIZE_P is nonzero, it performs initializations. Otherwise, it performs destructions. It only performs those initializations or destructions with the indicated __PRIORITY. The generated function returns no value. It is assumed that this function will only be called once per translation unit. */ static tree start_static_storage_duration_function (unsigned count) { tree type; tree body; char id[sizeof (SSDF_IDENTIFIER) + 1 /* '\0' */ + 32]; /* Create the identifier for this function. It will be of the form SSDF_IDENTIFIER_<number>. */ sprintf (id, "%s_%u", SSDF_IDENTIFIER, count); type = build_function_type_list (void_type_node, integer_type_node, integer_type_node, NULL_TREE); /* Create the FUNCTION_DECL itself. */ ssdf_decl = build_lang_decl (FUNCTION_DECL, get_identifier (id), type); TREE_PUBLIC (ssdf_decl) = 0; DECL_ARTIFICIAL (ssdf_decl) = 1; /* Put this function in the list of functions to be called from the static constructors and destructors. */ if (!ssdf_decls) { vec_alloc (ssdf_decls, 32); /* Take this opportunity to initialize the map from priority numbers to information about that priority level. */ priority_info_map = splay_tree_new (splay_tree_compare_ints, /*delete_key_fn=*/0, /*delete_value_fn=*/ (splay_tree_delete_value_fn) (void (*) (void)) free); /* We always need to generate functions for the DEFAULT_INIT_PRIORITY so enter it now. That way when we walk priorities later, we'll be sure to find the DEFAULT_INIT_PRIORITY. */ get_priority_info (DEFAULT_INIT_PRIORITY); } vec_safe_push (ssdf_decls, ssdf_decl); /* Create the argument list. */ initialize_p_decl = cp_build_parm_decl (ssdf_decl, get_identifier (INITIALIZE_P_IDENTIFIER), integer_type_node); TREE_USED (initialize_p_decl) = 1; priority_decl = cp_build_parm_decl (ssdf_decl, get_identifier (PRIORITY_IDENTIFIER), integer_type_node); TREE_USED (priority_decl) = 1; DECL_CHAIN (initialize_p_decl) = priority_decl; DECL_ARGUMENTS (ssdf_decl) = initialize_p_decl; /* Put the function in the global scope. */ pushdecl (ssdf_decl); /* Start the function itself. This is equivalent to declaring the function as: static void __ssdf (int __initialize_p, init __priority_p); It is static because we only need to call this function from the various constructor and destructor functions for this module. */ start_preparsed_function (ssdf_decl, /*attrs=*/NULL_TREE, SF_PRE_PARSED); /* Set up the scope of the outermost block in the function. */ body = begin_compound_stmt (BCS_FN_BODY); return body; } /* Finish the generation of the function which performs initialization and destruction of objects with static storage duration. After this point, no more such objects can be created. */ static void finish_static_storage_duration_function (tree body) { /* Close out the function. */ finish_compound_stmt (body); expand_or_defer_fn (finish_function (/*inline_p=*/false)); } /* Return the information about the indicated PRIORITY level. If no code to handle this level has yet been generated, generate the appropriate prologue. */ static priority_info get_priority_info (int priority) { priority_info pi; splay_tree_node n; n = splay_tree_lookup (priority_info_map, (splay_tree_key) priority); if (!n) { /* Create a new priority information structure, and insert it into the map. */ pi = XNEW (struct priority_info_s); pi->initializations_p = 0; pi->destructions_p = 0; splay_tree_insert (priority_info_map, (splay_tree_key) priority, (splay_tree_value) pi); } else pi = (priority_info) n->value; return pi; } /* The effective initialization priority of a DECL. */ #define DECL_EFFECTIVE_INIT_PRIORITY(decl) \ ((!DECL_HAS_INIT_PRIORITY_P (decl) || DECL_INIT_PRIORITY (decl) == 0) \ ? DEFAULT_INIT_PRIORITY : DECL_INIT_PRIORITY (decl)) /* Whether a DECL needs a guard to protect it against multiple initialization. */ #define NEEDS_GUARD_P(decl) (TREE_PUBLIC (decl) && (DECL_COMMON (decl) \ || DECL_ONE_ONLY (decl) \ || DECL_WEAK (decl))) /* Called from one_static_initialization_or_destruction(), via walk_tree. Walks the initializer list of a global variable and looks for temporary variables (DECL_NAME() == NULL and DECL_ARTIFICIAL != 0) and that have their DECL_CONTEXT() == NULL. For each such temporary variable, set their DECL_CONTEXT() to the current function. This is necessary because otherwise some optimizers (enabled by -O2 -fprofile-arcs) might crash when trying to refer to a temporary variable that does not have it's DECL_CONTECT() properly set. */ static tree fix_temporary_vars_context_r (tree *node, int * /*unused*/, void * /*unused1*/) { gcc_assert (current_function_decl); if (TREE_CODE (*node) == BIND_EXPR) { tree var; for (var = BIND_EXPR_VARS (*node); var; var = DECL_CHAIN (var)) if (VAR_P (var) && !DECL_NAME (var) && DECL_ARTIFICIAL (var) && !DECL_CONTEXT (var)) DECL_CONTEXT (var) = current_function_decl; } return NULL_TREE; } /* Set up to handle the initialization or destruction of DECL. If INITP is nonzero, we are initializing the variable. Otherwise, we are destroying it. */ static void one_static_initialization_or_destruction (tree decl, tree init, bool initp) { tree guard_if_stmt = NULL_TREE; tree guard; /* If we are supposed to destruct and there's a trivial destructor, nothing has to be done. */ if (!initp && TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl))) return; /* Trick the compiler into thinking we are at the file and line where DECL was declared so that error-messages make sense, and so that the debugger will show somewhat sensible file and line information. */ input_location = DECL_SOURCE_LOCATION (decl); /* Make sure temporary variables in the initialiser all have their DECL_CONTEXT() set to a value different from NULL_TREE. This can happen when global variables initializers are built. In that case, the DECL_CONTEXT() of the global variables _AND_ of all the temporary variables that might have been generated in the accompanying initializers is NULL_TREE, meaning the variables have been declared in the global namespace. What we want to do here is to fix that and make sure the DECL_CONTEXT() of the temporaries are set to the current function decl. */ cp_walk_tree_without_duplicates (&init, fix_temporary_vars_context_r, NULL); /* Because of: [class.access.spec] Access control for implicit calls to the constructors, the conversion functions, or the destructor called to create and destroy a static data member is performed as if these calls appeared in the scope of the member's class. we pretend we are in a static member function of the class of which the DECL is a member. */ if (member_p (decl)) { DECL_CONTEXT (current_function_decl) = DECL_CONTEXT (decl); DECL_STATIC_FUNCTION_P (current_function_decl) = 1; } /* Assume we don't need a guard. */ guard = NULL_TREE; /* We need a guard if this is an object with external linkage that might be initialized in more than one place. (For example, a static data member of a template, when the data member requires construction.) */ if (NEEDS_GUARD_P (decl)) { tree guard_cond; guard = get_guard (decl); /* When using __cxa_atexit, we just check the GUARD as we would for a local static. */ if (flag_use_cxa_atexit) { /* When using __cxa_atexit, we never try to destroy anything from a static destructor. */ gcc_assert (initp); guard_cond = get_guard_cond (guard, false); } /* If we don't have __cxa_atexit, then we will be running destructors from .fini sections, or their equivalents. So, we need to know how many times we've tried to initialize this object. We do initializations only if the GUARD is zero, i.e., if we are the first to initialize the variable. We do destructions only if the GUARD is one, i.e., if we are the last to destroy the variable. */ else if (initp) guard_cond = cp_build_binary_op (input_location, EQ_EXPR, cp_build_unary_op (PREINCREMENT_EXPR, guard, /*noconvert=*/true, tf_warning_or_error), integer_one_node, tf_warning_or_error); else guard_cond = cp_build_binary_op (input_location, EQ_EXPR, cp_build_unary_op (PREDECREMENT_EXPR, guard, /*noconvert=*/true, tf_warning_or_error), integer_zero_node, tf_warning_or_error); guard_if_stmt = begin_if_stmt (); finish_if_stmt_cond (guard_cond, guard_if_stmt); } /* If we're using __cxa_atexit, we have not already set the GUARD, so we must do so now. */ if (guard && initp && flag_use_cxa_atexit) finish_expr_stmt (set_guard (guard)); /* Perform the initialization or destruction. */ if (initp) { if (init) { finish_expr_stmt (init); if (sanitize_flags_p (SANITIZE_ADDRESS, decl)) { varpool_node *vnode = varpool_node::get (decl); if (vnode) vnode->dynamically_initialized = 1; } } /* If we're using __cxa_atexit, register a function that calls the destructor for the object. */ if (flag_use_cxa_atexit) finish_expr_stmt (register_dtor_fn (decl)); } else finish_expr_stmt (build_cleanup (decl)); /* Finish the guard if-stmt, if necessary. */ if (guard) { finish_then_clause (guard_if_stmt); finish_if_stmt (guard_if_stmt); } /* Now that we're done with DECL we don't need to pretend to be a member of its class any longer. */ DECL_CONTEXT (current_function_decl) = NULL_TREE; DECL_STATIC_FUNCTION_P (current_function_decl) = 0; } /* Generate code to do the initialization or destruction of the decls in VARS, a TREE_LIST of VAR_DECL with static storage duration. Whether initialization or destruction is performed is specified by INITP. */ static void do_static_initialization_or_destruction (tree vars, bool initp) { tree node, init_if_stmt, cond; /* Build the outer if-stmt to check for initialization or destruction. */ init_if_stmt = begin_if_stmt (); cond = initp ? integer_one_node : integer_zero_node; cond = cp_build_binary_op (input_location, EQ_EXPR, initialize_p_decl, cond, tf_warning_or_error); finish_if_stmt_cond (cond, init_if_stmt); /* To make sure dynamic construction doesn't access globals from other compilation units where they might not be yet constructed, for -fsanitize=address insert __asan_before_dynamic_init call that prevents access to either all global variables that need construction in other compilation units, or at least those that haven't been initialized yet. Variables that need dynamic construction in the current compilation unit are kept accessible. */ if (initp && (flag_sanitize & SANITIZE_ADDRESS)) finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/false)); node = vars; do { tree decl = TREE_VALUE (node); tree priority_if_stmt; int priority; priority_info pi; /* If we don't need a destructor, there's nothing to do. Avoid creating a possibly empty if-stmt. */ if (!initp && TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl))) { node = TREE_CHAIN (node); continue; } /* Remember that we had an initialization or finalization at this priority. */ priority = DECL_EFFECTIVE_INIT_PRIORITY (decl); pi = get_priority_info (priority); if (initp) pi->initializations_p = 1; else pi->destructions_p = 1; /* Conditionalize this initialization on being in the right priority and being initializing/finalizing appropriately. */ priority_if_stmt = begin_if_stmt (); cond = cp_build_binary_op (input_location, EQ_EXPR, priority_decl, build_int_cst (NULL_TREE, priority), tf_warning_or_error); finish_if_stmt_cond (cond, priority_if_stmt); /* Process initializers with same priority. */ for (; node && DECL_EFFECTIVE_INIT_PRIORITY (TREE_VALUE (node)) == priority; node = TREE_CHAIN (node)) /* Do one initialization or destruction. */ one_static_initialization_or_destruction (TREE_VALUE (node), TREE_PURPOSE (node), initp); /* Finish up the priority if-stmt body. */ finish_then_clause (priority_if_stmt); finish_if_stmt (priority_if_stmt); } while (node); /* Revert what __asan_before_dynamic_init did by calling __asan_after_dynamic_init. */ if (initp && (flag_sanitize & SANITIZE_ADDRESS)) finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/true)); /* Finish up the init/destruct if-stmt body. */ finish_then_clause (init_if_stmt); finish_if_stmt (init_if_stmt); } /* VARS is a list of variables with static storage duration which may need initialization and/or finalization. Remove those variables that don't really need to be initialized or finalized, and return the resulting list. The order in which the variables appear in VARS is in reverse order of the order in which they should actually be initialized. The list we return is in the unreversed order; i.e., the first variable should be initialized first. */ static tree prune_vars_needing_no_initialization (tree *vars) { tree *var = vars; tree result = NULL_TREE; while (*var) { tree t = *var; tree decl = TREE_VALUE (t); tree init = TREE_PURPOSE (t); /* Deal gracefully with error. */ if (error_operand_p (decl)) { var = &TREE_CHAIN (t); continue; } /* The only things that can be initialized are variables. */ gcc_assert (VAR_P (decl)); /* If this object is not defined, we don't need to do anything here. */ if (DECL_EXTERNAL (decl)) { var = &TREE_CHAIN (t); continue; } /* Also, if the initializer already contains errors, we can bail out now. */ if (init && TREE_CODE (init) == TREE_LIST && value_member (error_mark_node, init)) { var = &TREE_CHAIN (t); continue; } /* This variable is going to need initialization and/or finalization, so we add it to the list. */ *var = TREE_CHAIN (t); TREE_CHAIN (t) = result; result = t; } return result; } /* Make sure we have told the back end about all the variables in VARS. */ static void write_out_vars (tree vars) { tree v; for (v = vars; v; v = TREE_CHAIN (v)) { tree var = TREE_VALUE (v); if (!var_finalized_p (var)) { import_export_decl (var); rest_of_decl_compilation (var, 1, 1); } } } /* Generate a static constructor (if CONSTRUCTOR_P) or destructor (otherwise) that will initialize all global objects with static storage duration having the indicated PRIORITY. */ static void generate_ctor_or_dtor_function (bool constructor_p, int priority, location_t *locus) { char function_key; tree fndecl; tree body; size_t i; input_location = *locus; /* ??? */ /* Was: locus->line++; */ /* We use `I' to indicate initialization and `D' to indicate destruction. */ function_key = constructor_p ? 'I' : 'D'; /* We emit the function lazily, to avoid generating empty global constructors and destructors. */ body = NULL_TREE; /* For Objective-C++, we may need to initialize metadata found in this module. This must be done _before_ any other static initializations. */ if (c_dialect_objc () && (priority == DEFAULT_INIT_PRIORITY) && constructor_p && objc_static_init_needed_p ()) { body = start_objects (function_key, priority); objc_generate_static_init_call (NULL_TREE); } /* Call the static storage duration function with appropriate arguments. */ FOR_EACH_VEC_SAFE_ELT (ssdf_decls, i, fndecl) { /* Calls to pure or const functions will expand to nothing. */ if (! (flags_from_decl_or_type (fndecl) & (ECF_CONST | ECF_PURE))) { tree call; if (! body) body = start_objects (function_key, priority); call = cp_build_function_call_nary (fndecl, tf_warning_or_error, build_int_cst (NULL_TREE, constructor_p), build_int_cst (NULL_TREE, priority), NULL_TREE); finish_expr_stmt (call); } } /* Close out the function. */ if (body) finish_objects (function_key, priority, body); } /* Generate constructor and destructor functions for the priority indicated by N. */ static int generate_ctor_and_dtor_functions_for_priority (splay_tree_node n, void * data) { location_t *locus = (location_t *) data; int priority = (int) n->key; priority_info pi = (priority_info) n->value; /* Generate the functions themselves, but only if they are really needed. */ if (pi->initializations_p) generate_ctor_or_dtor_function (/*constructor_p=*/true, priority, locus); if (pi->destructions_p) generate_ctor_or_dtor_function (/*constructor_p=*/false, priority, locus); /* Keep iterating. */ return 0; } /* Return C++ property of T, based on given operation OP. */ static int cpp_check (tree t, cpp_operation op) { switch (op) { case HAS_DEPENDENT_TEMPLATE_ARGS: { tree ti = CLASSTYPE_TEMPLATE_INFO (t); if (!ti) return 0; ++processing_template_decl; const bool dep = any_dependent_template_arguments_p (TI_ARGS (ti)); --processing_template_decl; return dep; } case IS_ABSTRACT: return DECL_PURE_VIRTUAL_P (t); case IS_CONSTRUCTOR: return DECL_CONSTRUCTOR_P (t); case IS_DESTRUCTOR: return DECL_DESTRUCTOR_P (t); case IS_COPY_CONSTRUCTOR: return DECL_COPY_CONSTRUCTOR_P (t); case IS_MOVE_CONSTRUCTOR: return DECL_MOVE_CONSTRUCTOR_P (t); case IS_TEMPLATE: return TREE_CODE (t) == TEMPLATE_DECL; case IS_TRIVIAL: return trivial_type_p (t); default: return 0; } } /* Collect source file references recursively, starting from NAMESPC. */ static void collect_source_refs (tree namespc) { /* Iterate over names in this name space. */ for (tree t = NAMESPACE_LEVEL (namespc)->names; t; t = TREE_CHAIN (t)) if (DECL_IS_BUILTIN (t)) ; else if (TREE_CODE (t) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (t)) collect_source_refs (t); else collect_source_ref (DECL_SOURCE_FILE (t)); } /* Collect decls relevant to SOURCE_FILE from all namespaces recursively, starting from NAMESPC. */ static void collect_ada_namespace (tree namespc, const char *source_file) { tree decl = NAMESPACE_LEVEL (namespc)->names; /* Collect decls from this namespace. This will skip NAMESPACE_DECLs (both aliases and regular, it cannot tell). */ collect_ada_nodes (decl, source_file); /* Now scan for namespace children, and dump them. */ for (; decl; decl = TREE_CHAIN (decl)) if (TREE_CODE (decl) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (decl)) collect_ada_namespace (decl, source_file); } /* Returns true iff there is a definition available for variable or function DECL. */ bool decl_defined_p (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL) return (DECL_INITIAL (decl) != NULL_TREE /* A pending instantiation of a friend temploid is defined. */ || (DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl) && DECL_INITIAL (DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (decl))))); else { gcc_assert (VAR_P (decl)); return !DECL_EXTERNAL (decl); } } /* Nonzero for a VAR_DECL whose value can be used in a constant expression. [expr.const] An integral constant-expression can only involve ... const variables of integral or enumeration types initialized with constant expressions ... C++0x also allows constexpr variables and temporaries initialized with constant expressions. We handle the former here, but the latter are just folded away in cxx_eval_constant_expression. The standard does not require that the expression be non-volatile. G++ implements the proposed correction in DR 457. */ bool decl_constant_var_p (tree decl) { if (!decl_maybe_constant_var_p (decl)) return false; /* We don't know if a template static data member is initialized with a constant expression until we instantiate its initializer. Even in the case of a constexpr variable, we can't treat it as a constant until its initializer is complete in case it's used in its own initializer. */ maybe_instantiate_decl (decl); return DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl); } /* Returns true if DECL could be a symbolic constant variable, depending on its initializer. */ bool decl_maybe_constant_var_p (tree decl) { tree type = TREE_TYPE (decl); if (!VAR_P (decl)) return false; if (DECL_DECLARED_CONSTEXPR_P (decl)) return true; if (DECL_HAS_VALUE_EXPR_P (decl)) /* A proxy isn't constant. */ return false; if (TREE_CODE (type) == REFERENCE_TYPE) /* References can be constant. */; else if (CP_TYPE_CONST_NON_VOLATILE_P (type) && INTEGRAL_OR_ENUMERATION_TYPE_P (type)) /* And const integers. */; else return false; if (DECL_INITIAL (decl) && !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)) /* We know the initializer, and it isn't constant. */ return false; else return true; } /* Complain that DECL uses a type with no linkage. In C++98 mode this is called from grokfndecl and grokvardecl; in all modes it is called from cp_write_global_declarations. */ void no_linkage_error (tree decl) { if (cxx_dialect >= cxx11 && decl_defined_p (decl)) /* In C++11 it's ok if the decl is defined. */ return; tree t = no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false); if (t == NULL_TREE) /* The type that got us on no_linkage_decls must have gotten a name for linkage purposes. */; else if (CLASS_TYPE_P (t) && TYPE_BEING_DEFINED (t)) /* The type might end up having a typedef name for linkage purposes. */ vec_safe_push (no_linkage_decls, decl); else if (TYPE_UNNAMED_P (t)) { bool d = false; if (cxx_dialect >= cxx11) d = permerror (DECL_SOURCE_LOCATION (decl), "%q#D, declared using " "unnamed type, is used but never defined", decl); else if (DECL_EXTERN_C_P (decl)) /* Allow this; it's pretty common in C. */; else if (VAR_P (decl)) /* DRs 132, 319 and 389 seem to indicate types with no linkage can only be used to declare extern "C" entities. Since it's not always an error in the ISO C++ 90 Standard, we only issue a warning. */ d = warning_at (DECL_SOURCE_LOCATION (decl), 0, "unnamed type " "with no linkage used to declare variable %q#D with " "linkage", decl); else d = permerror (DECL_SOURCE_LOCATION (decl), "unnamed type with no " "linkage used to declare function %q#D with linkage", decl); if (d && is_typedef_decl (TYPE_NAME (t))) inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)), "%q#D does not refer " "to the unqualified type, so it is not used for linkage", TYPE_NAME (t)); } else if (cxx_dialect >= cxx11) { if (VAR_P (decl) || !DECL_PURE_VIRTUAL_P (decl)) permerror (DECL_SOURCE_LOCATION (decl), "%q#D, declared using local type " "%qT, is used but never defined", decl, t); } else if (VAR_P (decl)) warning_at (DECL_SOURCE_LOCATION (decl), 0, "type %qT with no linkage " "used to declare variable %q#D with linkage", t, decl); else permerror (DECL_SOURCE_LOCATION (decl), "type %qT with no linkage used " "to declare function %q#D with linkage", t, decl); } /* Collect declarations from all namespaces relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { collect_ada_namespace (global_namespace, source_file); } /* Clear DECL_EXTERNAL for NODE. */ static bool clear_decl_external (struct cgraph_node *node, void * /*data*/) { DECL_EXTERNAL (node->decl) = 0; return false; } /* Build up the function to run dynamic initializers for thread_local variables in this translation unit and alias the init functions for the individual variables to it. */ static void handle_tls_init (void) { tree vars = prune_vars_needing_no_initialization (&tls_aggregates); if (vars == NULL_TREE) return; location_t loc = DECL_SOURCE_LOCATION (TREE_VALUE (vars)); write_out_vars (vars); tree guard = build_decl (loc, VAR_DECL, get_identifier ("__tls_guard"), boolean_type_node); TREE_PUBLIC (guard) = false; TREE_STATIC (guard) = true; DECL_ARTIFICIAL (guard) = true; DECL_IGNORED_P (guard) = true; TREE_USED (guard) = true; CP_DECL_THREAD_LOCAL_P (guard) = true; set_decl_tls_model (guard, decl_default_tls_model (guard)); pushdecl_top_level_and_finish (guard, NULL_TREE); tree fn = get_local_tls_init_fn (); start_preparsed_function (fn, NULL_TREE, SF_PRE_PARSED); tree body = begin_function_body (); tree if_stmt = begin_if_stmt (); tree cond = cp_build_unary_op (TRUTH_NOT_EXPR, guard, false, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_expr_stmt (cp_build_modify_expr (loc, guard, NOP_EXPR, boolean_true_node, tf_warning_or_error)); for (; vars; vars = TREE_CHAIN (vars)) { tree var = TREE_VALUE (vars); tree init = TREE_PURPOSE (vars); one_static_initialization_or_destruction (var, init, true); /* Output init aliases even with -fno-extern-tls-init. */ if (TARGET_SUPPORTS_ALIASES && TREE_PUBLIC (var)) { tree single_init_fn = get_tls_init_fn (var); if (single_init_fn == NULL_TREE) continue; cgraph_node *alias = cgraph_node::get_create (fn)->create_same_body_alias (single_init_fn, fn); gcc_assert (alias != NULL); } } finish_then_clause (if_stmt); finish_if_stmt (if_stmt); finish_function_body (body); expand_or_defer_fn (finish_function (/*inline_p=*/false)); } /* We're at the end of compilation, so generate any mangling aliases that we've been saving up, if DECL is going to be output and ID2 isn't already taken by another declaration. */ static void generate_mangling_alias (tree decl, tree id2) { struct cgraph_node *n = NULL; if (TREE_CODE (decl) == FUNCTION_DECL) { n = cgraph_node::get (decl); if (!n) /* Don't create an alias to an unreferenced function. */ return; } tree *slot = mangled_decls->find_slot_with_hash (id2, IDENTIFIER_HASH_VALUE (id2), INSERT); /* If there's a declaration already using this mangled name, don't create a compatibility alias that conflicts. */ if (*slot) return; tree alias = make_alias_for (decl, id2); *slot = alias; DECL_IGNORED_P (alias) = 1; TREE_PUBLIC (alias) = TREE_PUBLIC (decl); DECL_VISIBILITY (alias) = DECL_VISIBILITY (decl); if (vague_linkage_p (decl)) DECL_WEAK (alias) = 1; if (n) n->create_same_body_alias (alias, decl); else varpool_node::create_extra_name_alias (alias, decl); } /* Note that we might want to emit an alias with the symbol ID2 for DECL at the end of translation, for compatibility across bugs in the mangling implementation. */ void note_mangling_alias (tree decl, tree id2) { if (TARGET_SUPPORTS_ALIASES) { if (!defer_mangling_aliases) generate_mangling_alias (decl, id2); else { vec_safe_push (mangling_aliases, decl); vec_safe_push (mangling_aliases, id2); } } } /* Emit all mangling aliases that were deferred up to this point. */ void generate_mangling_aliases () { while (!vec_safe_is_empty (mangling_aliases)) { tree id2 = mangling_aliases->pop(); tree decl = mangling_aliases->pop(); generate_mangling_alias (decl, id2); } defer_mangling_aliases = false; } /* Record a mangling of DECL, whose DECL_ASSEMBLER_NAME has just been set. NEED_WARNING is true if we must warn about collisions. We do this to spot changes in mangling that may require compatibility aliases. */ void record_mangling (tree decl, bool need_warning) { if (!mangled_decls) mangled_decls = hash_table<mangled_decl_hash>::create_ggc (499); gcc_checking_assert (DECL_ASSEMBLER_NAME_SET_P (decl)); tree id = DECL_ASSEMBLER_NAME_RAW (decl); tree *slot = mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id), INSERT); /* If this is already an alias, remove the alias, because the real decl takes precedence. */ if (*slot && DECL_ARTIFICIAL (*slot) && DECL_IGNORED_P (*slot)) if (symtab_node *n = symtab_node::get (*slot)) if (n->cpp_implicit_alias) { n->remove (); *slot = NULL_TREE; } if (!*slot) *slot = decl; else if (need_warning) { error_at (DECL_SOURCE_LOCATION (decl), "mangling of %q#D as %qE conflicts with a previous mangle", decl, id); inform (DECL_SOURCE_LOCATION (*slot), "previous mangling %q#D", *slot); inform (DECL_SOURCE_LOCATION (decl), "a later -fabi-version= (or =0)" " avoids this error with a change in mangling"); *slot = decl; } } /* The mangled name of DECL is being forcibly changed to NAME. Remove any existing knowledge of DECL's mangled name meaning DECL. */ void overwrite_mangling (tree decl, tree name) { if (tree id = DECL_ASSEMBLER_NAME_RAW (decl)) if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL) && mangled_decls) if (tree *slot = mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id), NO_INSERT)) if (*slot == decl) { mangled_decls->clear_slot (slot); /* If this is an alias, remove it from the symbol table. */ if (DECL_ARTIFICIAL (decl) && DECL_IGNORED_P (decl)) if (symtab_node *n = symtab_node::get (decl)) if (n->cpp_implicit_alias) n->remove (); } DECL_ASSEMBLER_NAME_RAW (decl) = name; } /* The entire file is now complete. If requested, dump everything to a file. */ static void dump_tu (void) { dump_flags_t flags; if (FILE *stream = dump_begin (raw_dump_id, &flags)) { dump_node (global_namespace, flags & ~TDF_SLIM, stream); dump_end (raw_dump_id, stream); } } static location_t locus_at_end_of_parsing; /* Check the deallocation functions for CODE to see if we want to warn that only one was defined. */ static void maybe_warn_sized_delete (enum tree_code code) { tree sized = NULL_TREE; tree unsized = NULL_TREE; for (ovl_iterator iter (get_global_binding (ovl_op_identifier (false, code))); iter; ++iter) { tree fn = *iter; /* We're only interested in usual deallocation functions. */ if (!usual_deallocation_fn_p (fn)) continue; if (FUNCTION_ARG_CHAIN (fn) == void_list_node) unsized = fn; else sized = fn; } if (DECL_INITIAL (unsized) && !DECL_INITIAL (sized)) warning_at (DECL_SOURCE_LOCATION (unsized), OPT_Wsized_deallocation, "the program should also define %qD", sized); else if (!DECL_INITIAL (unsized) && DECL_INITIAL (sized)) warning_at (DECL_SOURCE_LOCATION (sized), OPT_Wsized_deallocation, "the program should also define %qD", unsized); } /* Check the global deallocation functions to see if we want to warn about defining unsized without sized (or vice versa). */ static void maybe_warn_sized_delete () { if (!flag_sized_deallocation || !warn_sized_deallocation) return; maybe_warn_sized_delete (DELETE_EXPR); maybe_warn_sized_delete (VEC_DELETE_EXPR); } /* Earlier we left PTRMEM_CST in variable initializers alone so that we could look them up when evaluating non-type template parameters. Now we need to lower them to something the back end can understand. */ static void lower_var_init () { varpool_node *node; FOR_EACH_VARIABLE (node) { tree d = node->decl; if (tree init = DECL_INITIAL (d)) DECL_INITIAL (d) = cplus_expand_constant (init); } } /* This routine is called at the end of compilation. Its job is to create all the code needed to initialize and destroy the global aggregates. We do the destruction first, since that way we only need to reverse the decls once. */ void c_parse_final_cleanups (void) { tree vars; bool reconsider; size_t i; unsigned ssdf_count = 0; int retries = 0; tree decl; locus_at_end_of_parsing = input_location; at_eof = 1; /* Bad parse errors. Just forget about it. */ if (! global_bindings_p () || current_class_type || !vec_safe_is_empty (decl_namespace_list)) return; /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { /* Mangle all symbols at PCH creation time. */ symtab_node *node; FOR_EACH_SYMBOL (node) if (! is_a <varpool_node *> (node) || ! DECL_HARD_REGISTER (node->decl)) DECL_ASSEMBLER_NAME (node->decl); c_common_write_pch (); dump_tu (); /* Ensure even the callers don't try to finalize the CU. */ flag_syntax_only = 1; return; } timevar_stop (TV_PHASE_PARSING); timevar_start (TV_PHASE_DEFERRED); symtab->process_same_body_aliases (); /* Handle -fdump-ada-spec[-slim] */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { if (flag_dump_ada_spec_slim) collect_source_ref (main_input_filename); else collect_source_refs (global_namespace); dump_ada_specs (collect_all_refs, cpp_check); } /* FIXME - huh? was input_line -= 1;*/ /* We now have to write out all the stuff we put off writing out. These include: o Template specializations that we have not yet instantiated, but which are needed. o Initialization and destruction for non-local objects with static storage duration. (Local objects with static storage duration are initialized when their scope is first entered, and are cleaned up via atexit.) o Virtual function tables. All of these may cause others to be needed. For example, instantiating one function may cause another to be needed, and generating the initializer for an object may cause templates to be instantiated, etc., etc. */ emit_support_tinfos (); do { tree t; tree decl; reconsider = false; /* If there are templates that we've put off instantiating, do them now. */ instantiate_pending_templates (retries); ggc_collect (); /* Write out virtual tables as required. Writing out the virtual table for a template class may cause the instantiation of members of that class. If we write out vtables then we remove the class from our list so we don't have to look at it again. */ for (i = keyed_classes->length (); keyed_classes->iterate (--i, &t);) if (maybe_emit_vtables (t)) { reconsider = true; keyed_classes->unordered_remove (i); } /* The input_location may have been changed during marking of vtable entries. */ input_location = locus_at_end_of_parsing; /* Write out needed type info variables. We have to be careful looping through unemitted decls, because emit_tinfo_decl may cause other variables to be needed. New elements will be appended, and we remove from the vector those that actually get emitted. */ for (i = unemitted_tinfo_decls->length (); unemitted_tinfo_decls->iterate (--i, &t);) if (emit_tinfo_decl (t)) { reconsider = true; unemitted_tinfo_decls->unordered_remove (i); } /* The list of objects with static storage duration is built up in reverse order. We clear STATIC_AGGREGATES so that any new aggregates added during the initialization of these will be initialized in the correct order when we next come around the loop. */ vars = prune_vars_needing_no_initialization (&static_aggregates); if (vars) { /* We need to start a new initialization function each time through the loop. That's because we need to know which vtables have been referenced, and TREE_SYMBOL_REFERENCED isn't computed until a function is finished, and written out. That's a deficiency in the back end. When this is fixed, these initialization functions could all become inline, with resulting performance improvements. */ tree ssdf_body; /* Set the line and file, so that it is obviously not from the source file. */ input_location = locus_at_end_of_parsing; ssdf_body = start_static_storage_duration_function (ssdf_count); /* Make sure the back end knows about all the variables. */ write_out_vars (vars); /* First generate code to do all the initializations. */ if (vars) do_static_initialization_or_destruction (vars, /*initp=*/true); /* Then, generate code to do all the destructions. Do these in reverse order so that the most recently constructed variable is the first destroyed. If we're using __cxa_atexit, then we don't need to do this; functions were registered at initialization time to destroy the local statics. */ if (!flag_use_cxa_atexit && vars) { vars = nreverse (vars); do_static_initialization_or_destruction (vars, /*initp=*/false); } else vars = NULL_TREE; /* Finish up the static storage duration function for this round. */ input_location = locus_at_end_of_parsing; finish_static_storage_duration_function (ssdf_body); /* All those initializations and finalizations might cause us to need more inline functions, more template instantiations, etc. */ reconsider = true; ssdf_count++; /* ??? was: locus_at_end_of_parsing.line++; */ } /* Now do the same for thread_local variables. */ handle_tls_init (); /* Go through the set of inline functions whose bodies have not been emitted yet. If out-of-line copies of these functions are required, emit them. */ FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl) { /* Does it need synthesizing? */ if (DECL_DEFAULTED_FN (decl) && ! DECL_INITIAL (decl) && (! DECL_REALLY_EXTERN (decl) || possibly_inlined_p (decl))) { /* Even though we're already at the top-level, we push there again. That way, when we pop back a few lines hence, all of our state is restored. Otherwise, finish_function doesn't clean things up, and we end up with CURRENT_FUNCTION_DECL set. */ push_to_top_level (); /* The decl's location will mark where it was first needed. Save that so synthesize method can indicate where it was needed from, in case of error */ input_location = DECL_SOURCE_LOCATION (decl); synthesize_method (decl); pop_from_top_level (); reconsider = true; } if (!DECL_INITIAL (decl) && decl_tls_wrapper_p (decl)) generate_tls_wrapper (decl); if (!DECL_SAVED_TREE (decl)) continue; cgraph_node *node = cgraph_node::get_create (decl); /* We lie to the back end, pretending that some functions are not defined when they really are. This keeps these functions from being put out unnecessarily. But, we must stop lying when the functions are referenced, or if they are not comdat since they need to be put out now. If DECL_INTERFACE_KNOWN, then we have already set DECL_EXTERNAL appropriately, so there's no need to check again, and we do not want to clear DECL_EXTERNAL if a previous call to import_export_decl set it. This is done in a separate for cycle, because if some deferred function is contained in another deferred function later in deferred_fns varray, rest_of_compilation would skip this function and we really cannot expand the same function twice. */ import_export_decl (decl); if (DECL_NOT_REALLY_EXTERN (decl) && DECL_INITIAL (decl) && decl_needed_p (decl)) { if (node->cpp_implicit_alias) node = node->get_alias_target (); node->call_for_symbol_thunks_and_aliases (clear_decl_external, NULL, true); /* If we mark !DECL_EXTERNAL one of the symbols in some comdat group, we need to mark all symbols in the same comdat group that way. */ if (node->same_comdat_group) for (cgraph_node *next = dyn_cast<cgraph_node *> (node->same_comdat_group); next != node; next = dyn_cast<cgraph_node *> (next->same_comdat_group)) next->call_for_symbol_thunks_and_aliases (clear_decl_external, NULL, true); } /* If we're going to need to write this function out, and there's already a body for it, create RTL for it now. (There might be no body if this is a method we haven't gotten around to synthesizing yet.) */ if (!DECL_EXTERNAL (decl) && decl_needed_p (decl) && !TREE_ASM_WRITTEN (decl) && !node->definition) { /* We will output the function; no longer consider it in this loop. */ DECL_DEFER_OUTPUT (decl) = 0; /* Generate RTL for this function now that we know we need it. */ expand_or_defer_fn (decl); /* If we're compiling -fsyntax-only pretend that this function has been written out so that we don't try to expand it again. */ if (flag_syntax_only) TREE_ASM_WRITTEN (decl) = 1; reconsider = true; } } if (wrapup_namespace_globals ()) reconsider = true; /* Static data members are just like namespace-scope globals. */ FOR_EACH_VEC_SAFE_ELT (pending_statics, i, decl) { if (var_finalized_p (decl) || DECL_REALLY_EXTERN (decl) /* Don't write it out if we haven't seen a definition. */ || (DECL_IN_AGGR_P (decl) && !DECL_INLINE_VAR_P (decl)) /* Or haven't instantiated it. */ || (DECL_TEMPLATE_INSTANTIATION (decl) && !DECL_TEMPLATE_INSTANTIATED (decl))) continue; import_export_decl (decl); /* If this static data member is needed, provide it to the back end. */ if (DECL_NOT_REALLY_EXTERN (decl) && decl_needed_p (decl)) DECL_EXTERNAL (decl) = 0; } if (vec_safe_length (pending_statics) != 0 && wrapup_global_declarations (pending_statics->address (), pending_statics->length ())) reconsider = true; retries++; } while (reconsider); lower_var_init (); generate_mangling_aliases (); /* All used inline functions must have a definition at this point. */ FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl) { if (/* Check online inline functions that were actually used. */ DECL_ODR_USED (decl) && DECL_DECLARED_INLINE_P (decl) /* If the definition actually was available here, then the fact that the function was not defined merely represents that for some reason (use of a template repository, #pragma interface, etc.) we decided not to emit the definition here. */ && !DECL_INITIAL (decl) /* Don't complain if the template was defined. */ && !(DECL_TEMPLATE_INSTANTIATION (decl) && DECL_INITIAL (DECL_TEMPLATE_RESULT (template_for_substitution (decl))))) { warning_at (DECL_SOURCE_LOCATION (decl), 0, "inline function %qD used but never defined", decl); /* Avoid a duplicate warning from check_global_declaration. */ TREE_NO_WARNING (decl) = 1; } } /* So must decls that use a type with no linkage. */ FOR_EACH_VEC_SAFE_ELT (no_linkage_decls, i, decl) no_linkage_error (decl); maybe_warn_sized_delete (); /* Then, do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). This must be done after C++ templates, destructors etc. so that selectors used in C++ templates are properly allocated. */ if (c_dialect_objc ()) objc_write_global_declarations (); /* We give C linkage to static constructors and destructors. */ push_lang_context (lang_name_c); /* Generate initialization and destruction functions for all priorities for which they are required. */ if (priority_info_map) splay_tree_foreach (priority_info_map, generate_ctor_and_dtor_functions_for_priority, /*data=*/&locus_at_end_of_parsing); else if (c_dialect_objc () && objc_static_init_needed_p ()) /* If this is obj-c++ and we need a static init, call generate_ctor_or_dtor_function. */ generate_ctor_or_dtor_function (/*constructor_p=*/true, DEFAULT_INIT_PRIORITY, &locus_at_end_of_parsing); /* We're done with the splay-tree now. */ if (priority_info_map) splay_tree_delete (priority_info_map); /* Generate any missing aliases. */ maybe_apply_pending_pragma_weaks (); /* We're done with static constructors, so we can go back to "C++" linkage now. */ pop_lang_context (); if (flag_vtable_verify) { vtv_recover_class_info (); vtv_compute_class_hierarchy_transitive_closure (); vtv_build_vtable_verify_fndecl (); } perform_deferred_noexcept_checks (); finish_repo (); fini_constexpr (); /* The entire file is now complete. If requested, dump everything to a file. */ dump_tu (); if (flag_detailed_statistics) { dump_tree_statistics (); dump_time_statistics (); } timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_PARSING); /* Indicate that we're done with front end processing. */ at_eof = 2; } /* Perform any post compilation-proper cleanups for the C++ front-end. This should really go away. No front-end should need to do anything past the compilation process. */ void cxx_post_compilation_parsing_cleanups (void) { timevar_start (TV_PHASE_LATE_PARSING_CLEANUPS); if (flag_vtable_verify) { /* Generate the special constructor initialization function that calls __VLTRegisterPairs, and give it a very high initialization priority. This must be done after finalize_compilation_unit so that we have accurate information about which vtable will actually be emitted. */ vtv_generate_init_routine (); } input_location = locus_at_end_of_parsing; if (flag_checking) validate_conversion_obstack (); timevar_stop (TV_PHASE_LATE_PARSING_CLEANUPS); } /* FN is an OFFSET_REF, DOTSTAR_EXPR or MEMBER_REF indicating the function to call in parse-tree form; it has not yet been semantically analyzed. ARGS are the arguments to the function. They have already been semantically analyzed. This may change ARGS. */ tree build_offset_ref_call_from_tree (tree fn, vec<tree, va_gc> **args, tsubst_flags_t complain) { tree orig_fn; vec<tree, va_gc> *orig_args = NULL; tree expr; tree object; orig_fn = fn; object = TREE_OPERAND (fn, 0); if (processing_template_decl) { gcc_assert (TREE_CODE (fn) == DOTSTAR_EXPR || TREE_CODE (fn) == MEMBER_REF); if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args)) return build_min_nt_call_vec (fn, *args); orig_args = make_tree_vector_copy (*args); /* Transform the arguments and add the implicit "this" parameter. That must be done before the FN is transformed because we depend on the form of FN. */ make_args_non_dependent (*args); object = build_non_dependent_expr (object); if (TREE_CODE (TREE_TYPE (fn)) == METHOD_TYPE) { if (TREE_CODE (fn) == DOTSTAR_EXPR) object = cp_build_addr_expr (object, complain); vec_safe_insert (*args, 0, object); } /* Now that the arguments are done, transform FN. */ fn = build_non_dependent_expr (fn); } /* A qualified name corresponding to a bound pointer-to-member is represented as an OFFSET_REF: struct B { void g(); }; void (B::*p)(); void B::g() { (this->*p)(); } */ if (TREE_CODE (fn) == OFFSET_REF) { tree object_addr = cp_build_addr_expr (object, complain); fn = TREE_OPERAND (fn, 1); fn = get_member_function_from_ptrfunc (&object_addr, fn, complain); vec_safe_insert (*args, 0, object_addr); } if (CLASS_TYPE_P (TREE_TYPE (fn))) expr = build_op_call (fn, args, complain); else expr = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && expr != error_mark_node) expr = build_min_non_dep_call_vec (expr, orig_fn, orig_args); if (orig_args != NULL) release_tree_vector (orig_args); return expr; } void check_default_args (tree x) { tree arg = TYPE_ARG_TYPES (TREE_TYPE (x)); bool saw_def = false; int i = 0 - (TREE_CODE (TREE_TYPE (x)) == METHOD_TYPE); for (; arg && arg != void_list_node; arg = TREE_CHAIN (arg), ++i) { if (TREE_PURPOSE (arg)) saw_def = true; else if (saw_def && !PACK_EXPANSION_P (TREE_VALUE (arg))) { error ("default argument missing for parameter %P of %q+#D", i, x); TREE_PURPOSE (arg) = error_mark_node; } } } /* Return true if function DECL can be inlined. This is used to force instantiation of methods that might be interesting for inlining. */ bool possibly_inlined_p (tree decl) { gcc_assert (TREE_CODE (decl) == FUNCTION_DECL); if (DECL_UNINLINABLE (decl)) return false; if (!optimize) return DECL_DECLARED_INLINE_P (decl); /* When optimizing, we might inline everything when flatten attribute or heuristics inlining for size or autoinlining is used. */ return true; } /* Normally, we can wait until instantiation-time to synthesize DECL. However, if DECL is a static data member initialized with a constant or a constexpr function, we need it right now because a reference to such a data member or a call to such function is not value-dependent. For a function that uses auto in the return type, we need to instantiate it to find out its type. For OpenMP user defined reductions, we need them instantiated for reduction clauses which inline them by hand directly. */ static void maybe_instantiate_decl (tree decl) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && (decl_maybe_constant_var_p (decl) || (TREE_CODE (decl) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (decl)) || undeduced_auto_decl (decl)) && !DECL_DECLARED_CONCEPT_P (decl) && !uses_template_parms (DECL_TI_ARGS (decl))) { /* Instantiating a function will result in garbage collection. We must treat this situation as if we were within the body of a function so as to avoid collecting live data only referenced from the stack (such as overload resolution candidates). */ ++function_depth; instantiate_decl (decl, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); --function_depth; } } /* Mark DECL (either a _DECL or a BASELINK) as "used" in the program. If DECL is a specialization or implicitly declared class member, generate the actual definition. Return false if something goes wrong, true otherwise. */ bool mark_used (tree decl, tsubst_flags_t complain) { /* If we're just testing conversions or resolving overloads, we don't want any permanent effects like forcing functions to be output or instantiating templates. */ if ((complain & tf_conv)) return true; /* If DECL is a BASELINK for a single function, then treat it just like the DECL for the function. Otherwise, if the BASELINK is for an overloaded function, we don't know which function was actually used until after overload resolution. */ if (BASELINK_P (decl)) { decl = BASELINK_FUNCTIONS (decl); if (really_overloaded_fn (decl)) return true; decl = OVL_FIRST (decl); } /* Set TREE_USED for the benefit of -Wunused. */ TREE_USED (decl) = 1; /* And for structured bindings also the underlying decl. */ if (DECL_DECOMPOSITION_P (decl) && DECL_DECOMP_BASE (decl)) TREE_USED (DECL_DECOMP_BASE (decl)) = 1; if (TREE_CODE (decl) == TEMPLATE_DECL) return true; if (DECL_CLONED_FUNCTION_P (decl)) TREE_USED (DECL_CLONED_FUNCTION (decl)) = 1; /* Mark enumeration types as used. */ if (TREE_CODE (decl) == CONST_DECL) used_types_insert (DECL_CONTEXT (decl)); if (TREE_CODE (decl) == FUNCTION_DECL && !maybe_instantiate_noexcept (decl, complain)) return false; if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DELETED_FN (decl)) { if (DECL_ARTIFICIAL (decl) && DECL_CONV_FN_P (decl) && LAMBDA_TYPE_P (DECL_CONTEXT (decl))) /* We mark a lambda conversion op as deleted if we can't generate it properly; see maybe_add_lambda_conv_op. */ sorry ("converting lambda that uses %<...%> to function pointer"); else if (complain & tf_error) { error ("use of deleted function %qD", decl); if (!maybe_explain_implicit_delete (decl)) inform (DECL_SOURCE_LOCATION (decl), "declared here"); } return false; } if (TREE_DEPRECATED (decl) && (complain & tf_warning) && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (decl, NULL_TREE); /* We can only check DECL_ODR_USED on variables or functions with DECL_LANG_SPECIFIC set, and these are also the only decls that we might need special handling for. */ if (!VAR_OR_FUNCTION_DECL_P (decl) || DECL_LANG_SPECIFIC (decl) == NULL || DECL_THUNK_P (decl)) { if (!processing_template_decl && !require_deduced_type (decl, complain)) return false; return true; } /* We only want to do this processing once. We don't need to keep trying to instantiate inline templates, because unit-at-a-time will make sure we get them compiled before functions that want to inline them. */ if (DECL_ODR_USED (decl)) return true; /* Normally, we can wait until instantiation-time to synthesize DECL. However, if DECL is a static data member initialized with a constant or a constexpr function, we need it right now because a reference to such a data member or a call to such function is not value-dependent. For a function that uses auto in the return type, we need to instantiate it to find out its type. For OpenMP user defined reductions, we need them instantiated for reduction clauses which inline them by hand directly. */ maybe_instantiate_decl (decl); if (processing_template_decl || in_template_function ()) return true; /* Check this too in case we're within instantiate_non_dependent_expr. */ if (DECL_TEMPLATE_INFO (decl) && uses_template_parms (DECL_TI_ARGS (decl))) return true; if (!require_deduced_type (decl, complain)) return false; if (builtin_pack_fn_p (decl)) { error ("use of built-in parameter pack %qD outside of a template", DECL_NAME (decl)); return false; } /* If we don't need a value, then we don't need to synthesize DECL. */ if (cp_unevaluated_operand || in_discarded_stmt) return true; DECL_ODR_USED (decl) = 1; if (DECL_CLONED_FUNCTION_P (decl)) DECL_ODR_USED (DECL_CLONED_FUNCTION (decl)) = 1; /* DR 757: A type without linkage shall not be used as the type of a variable or function with linkage, unless o the variable or function has extern "C" linkage (7.5 [dcl.link]), or o the variable or function is not used (3.2 [basic.def.odr]) or is defined in the same translation unit. */ if (cxx_dialect > cxx98 && decl_linkage (decl) != lk_none && !DECL_EXTERN_C_P (decl) && !DECL_ARTIFICIAL (decl) && !decl_defined_p (decl) && no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false)) { if (is_local_extern (decl)) /* There's no way to define a local extern, and adding it to the vector interferes with GC, so give an error now. */ no_linkage_error (decl); else vec_safe_push (no_linkage_decls, decl); } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && !DECL_INITIAL (decl) && !DECL_ARTIFICIAL (decl)) /* Remember it, so we can check it was defined. */ note_vague_linkage_fn (decl); /* Is it a synthesized method that needs to be synthesized? */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl) && DECL_DEFAULTED_FN (decl) /* A function defaulted outside the class is synthesized either by cp_finish_decl or instantiate_decl. */ && !DECL_DEFAULTED_OUTSIDE_CLASS_P (decl) && ! DECL_INITIAL (decl)) { /* Defer virtual destructors so that thunks get the right linkage. */ if (DECL_VIRTUAL_P (decl) && !at_eof) { note_vague_linkage_fn (decl); return true; } /* Remember the current location for a function we will end up synthesizing. Then we can inform the user where it was required in the case of error. */ DECL_SOURCE_LOCATION (decl) = input_location; /* Synthesizing an implicitly defined member function will result in garbage collection. We must treat this situation as if we were within the body of a function so as to avoid collecting live data on the stack (such as overload resolution candidates). We could just let cp_write_global_declarations handle synthesizing this function by adding it to deferred_fns, but doing it at the use site produces better error messages. */ ++function_depth; synthesize_method (decl); --function_depth; /* If this is a synthesized method we don't need to do the instantiation test below. */ } else if (VAR_OR_FUNCTION_DECL_P (decl) && DECL_TEMPLATE_INFO (decl) && !DECL_DECLARED_CONCEPT_P (decl) && (!DECL_EXPLICIT_INSTANTIATION (decl) || always_instantiate_p (decl))) /* If this is a function or variable that is an instance of some template, we now know that we will need to actually do the instantiation. We check that DECL is not an explicit instantiation because that is not checked in instantiate_decl. We put off instantiating functions in order to improve compile times. Maintaining a stack of active functions is expensive, and the inliner knows to instantiate any functions it might need. Therefore, we always try to defer instantiation. */ { ++function_depth; instantiate_decl (decl, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); --function_depth; } return true; } bool mark_used (tree decl) { return mark_used (decl, tf_warning_or_error); } tree vtv_start_verification_constructor_init_function (void) { return start_objects ('I', MAX_RESERVED_INIT_PRIORITY - 1); } tree vtv_finish_verification_constructor_init_function (tree function_body) { tree fn; finish_compound_stmt (function_body); fn = finish_function (/*inline_p=*/false); DECL_STATIC_CONSTRUCTOR (fn) = 1; decl_init_priority_insert (fn, MAX_RESERVED_INIT_PRIORITY - 1); return fn; } #include "gt-cp-decl2.h"
DRB049-fprintf-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Example use of fprintf */ #include <stdio.h> int main(int argc, char* argv[]) { int i; int ret; FILE* pfile; int len=1000; int A[1000]; #pragma omp target data map(from:A[0:1000]) #pragma omp target parallel for for (i=0; i<len; i++) A[i]=i; pfile = fopen("mytempfile.txt","a+"); if (pfile ==NULL) { fprintf(stderr,"Error in fopen()\n"); } for (i=0; i<len; ++i) { fprintf(pfile, "%d\n", A[i] ); } fclose(pfile); ret = remove("mytempfile.txt"); if (ret != 0) { fprintf(stderr, "Error: unable to delete mytempfile.txt\n"); } return 0; }
GB_unop__identity_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_fp32 // op(A') function: GB_unop_tran__identity_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lxor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_03__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int64) // A*D function (colscale): GB (_AxD__lxor_int64) // D*A function (rowscale): GB (_DxB__lxor_int64) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int64) // C=scalar+B GB (_bind1st__lxor_int64) // C=scalar+B' GB (_bind1st_tran__lxor_int64) // C=A+scalar GB (_bind2nd__lxor_int64) // C=A'+scalar GB (_bind2nd_tran__lxor_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT64 || GxB_NO_LXOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lxor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lxor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
exact_rhs.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" //--------------------------------------------------------------------- // compute the right hand side based on exact solution //--------------------------------------------------------------------- void exact_rhs() { double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; //--------------------------------------------------------------------- // initialize //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { for (m = 0; m < 5; m++) { forcing[m][k][j][i] = 0.0; } } } } //--------------------------------------------------------------------- // xi-direction flux differences //--------------------------------------------------------------------- for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i <= grid_points[0]-2; i++) { im1 = i-1; ip1 = i+1; forcing[0][k][j][i] = forcing[0][k][j][i] - tx2*( ue[ip1][1]-ue[im1][1] )+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[1][k][j][i] = forcing[1][k][j][i] - tx2 * ( (ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ue[im1][1]); forcing[2][k][j][i] = forcing[2][k][j][i] - tx2 * ( ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0*ue[i][2] +ue[im1][2]); forcing[3][k][j][i] = forcing[3][k][j][i] - tx2*( ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[4][k][j][i] = forcing[4][k][j][i] - tx2*( buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { i = 1; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (i = 3; i <= grid_points[0]-4; i++) { for (m = 0; m < 5; m++) { forcing[m][k][j][i] = forcing[m][k][j][i] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } //--------------------------------------------------------------------- // eta-direction flux differences //--------------------------------------------------------------------- for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m < 5; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j <= grid_points[1]-2; j++) { jm1 = j-1; jp1 = j+1; forcing[0][k][j][i] = forcing[0][k][j][i] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[1][k][j][i] = forcing[1][k][j][i] - ty2*( ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[2][k][j][i] = forcing[2][k][j][i] - ty2*( (ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[3][k][j][i] = forcing[3][k][j][i] - ty2*( ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[4][k][j][i] = forcing[4][k][j][i] - ty2*( buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { j = 1; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (j = 3; j <= grid_points[1]-4; j++) { for (m = 0; m < 5; m++) { forcing[m][k][j][i] = forcing[m][k][j][i] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } //--------------------------------------------------------------------- // zeta-direction flux differences //--------------------------------------------------------------------- for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m < 5; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k <= grid_points[2]-2; k++) { km1 = k-1; kp1 = k+1; forcing[0][k][j][i] = forcing[0][k][j][i] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[1][k][j][i] = forcing[1][k][j][i] - tz2 * ( ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[2][k][j][i] = forcing[2][k][j][i] - tz2 * ( ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[3][k][j][i] = forcing[3][k][j][i] - tz2 * ( (ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[4][k][j][i] = forcing[4][k][j][i] - tz2 * ( buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]+buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { k = 1; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (k = 3; k <= grid_points[2]-4; k++) { for (m = 0; m < 5; m++) { forcing[m][k][j][i] = forcing[m][k][j][i] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[m][k][j][i] = forcing[m][k][j][i] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } //--------------------------------------------------------------------- // now change the sign of the forcing function, //--------------------------------------------------------------------- for (k = 1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { forcing[m][k][j][i] = -1.0 * forcing[m][k][j][i]; } } } } #pragma omp target update to(forcing) }
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const double x_shear,const double x_shear, % const double width,const double height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const double x_shear,const double y_shear, const double width,const double height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=(ssize_t) ceil(min.x-0.5); geometry.y=(ssize_t) ceil(min.y-0.5); geometry.width=(size_t) floor(max.x-min.x+0.5); geometry.height=(size_t) floor(max.y-min.y+0.5); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The result will be auto-croped if the artifact "deskew:auto-crop" is % defined, while the amount the image is to be deskewed, in degrees is also % saved as the artifact "deskew:angle". % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrixs, MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection) { MatrixInfo *swap; register MatrixInfo *p, *q; register ssize_t x; size_t step; p=source_matrixs; q=destination_matrixs; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrixs=AcquireMatrixInfo(width,image->rows, sizeof(unsigned short),exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs=DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } for (j=0; j < 256; j++) { c=(unsigned char) j; for (count=0; c != 0; c>>=1) count+=c & 0x01; bits[j]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,-1,projection); (void) NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,1,projection); image_view=DestroyCacheView(image_view); destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; PixelInfo background; double count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(image,p); background.green+=QuantumScale*GetPixelGreen(image,p); background.blue+=QuantumScale*GetPixelBlue(image,p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha+=QuantumScale*GetPixelAlpha(image,p); count++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->background_color.red=(double) ClampToQuantum(QuantumRange* background.red/count); image->background_color.green=(double) ClampToQuantum(QuantumRange* background.green/count); image->background_color.blue=(double) ClampToQuantum(QuantumRange* background.blue/count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha=(double) ClampToQuantum(QuantumRange* background.alpha/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MagickPathExtent]; (void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod, exception); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; switch (rotations) { case 0: default: { rotate_image=CloneImage(image,0,0,MagickTrue,exception); break; } case 2: { rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); break; } case 1: case 3: { rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); break; } } if (rotate_image == (Image *) NULL) return((Image *) NULL); if (rotations == 0) return(rotate_image); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { register ssize_t y; /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(rotate_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(rotate_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((width-1)-y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; background=image->background_color; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=x_offset*GetPixelChannels(image); displacement=degrees*(double) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (x_offset+width+step-i) > image->columns) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,XShearImageTag,progress,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; background=image->background_color; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { double area, displacement; PixelInfo pixel, source, destination; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=y_offset*GetPixelChannels(image); displacement=degrees*(double) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (y_offset+height+step-i) > image->rows) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,YShearImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute image size. */ bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5); bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)- image->columns)/2.0-0.5); bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)- image->rows)/2.0-0.5); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,image->compose,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->alpha_trait=image->alpha_trait; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5); bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,image->compose, exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->alpha_trait=image->alpha_trait; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); //host memory that prevents from being swapped out } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted * \param use_copy if set to true, directly copy src to dst[i] without looking up the provided row_ids */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { if (stype == kDefaultStorage) { merge_buf_[key].merged = NDArray(shape, pinned_ctx_, false, type); } else { merge_buf_[key].merged = NDArray(stype, shape, pinned_ctx_, true, type); } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (src[0].storage_type() == kDefaultStorage) { return src[0]; } else { // if sparse and only one GPU, always update weight on CPU CopyFromTo(src[0], &buf.merged, priority); return buf.merged; } } if (buf.merged.storage_type() == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf.merged, priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate NDArray based on storage type buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); } else { // buf.merged is a sparse ndarray. std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } auto result = buf.merged; Engine::Get()->PushAsync( [reduce, result, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = result; Resource rsc = ResourceManager::Get()->Request(rctx.ctx, ResourceRequest(ResourceRequest::kTempSpace)); is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {result.var()}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); } return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // first copy data to cpu, then broadcast auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) CopyFromTo(buf.merged, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; if (use_copy) { CopyFromTo(src, out, priority); } else { CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool use_sparse_retain = (src.shape()[0] != src.storage_shape()[0]) || (row_id.dtype() != out->aux_type(rowsparse::kIdx)) || (out->ctx().dev_mask() != Context::kGPU); if (use_sparse_retain) { // use sparse_retain op const bool is_to_gpu = out->ctx().dev_mask() == Context::kGPU; NDArray out_cpu = is_to_gpu? NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()) : *out; Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = out_cpu; // get rid of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {out_cpu.var()}, FnProperty::kNormal, priority, PROFILER_MESSAGE("KVStoreSparseRetain")); if (is_to_gpu) { CopyFromTo(out_cpu, out, priority); } } else { // direct copy rows Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { CopyRetainedRowsToGPU(rctx.get_stream<cpu>(), rctx.get_stream<gpu>(), src, row_id, out); on_complete(); }, out->ctx(), {src.var(), row_id.var()}, {out->var()}, FnProperty::kCopyToGPU, priority, PROFILER_MESSAGE("KVStoreCopyRetainedRowsToGPU")); } } } } private: /*! * \brief When src is a rsp with full rows, * simply copy retained rows directly from cpu to gpu * without invoking sparse_retain op. */ void CopyRetainedRowsToGPU(mshadow::Stream<cpu>* cpu_stream, mshadow::Stream<gpu>* gpu_stream, const NDArray& src, const NDArray& indices, NDArray* dst) { #if MXNET_USE_CUDA == 1 CHECK_EQ(src.storage_type(), kRowSparseStorage) << "CopyRetainedRowsToGPU expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "CopyRetainedRowsToGPU with src on gpu context not supported"; CHECK_EQ(src.storage_shape()[0], src.shape()[0]) << "CopyRetainedRowsToGPU only supports src rsp with full rows"; CHECK_EQ(indices.storage_type(), kDefaultStorage); CHECK_EQ(indices.ctx().dev_mask(), Context::kCPU); CHECK_EQ(dst->storage_type(), kRowSparseStorage); CHECK_EQ(dst->ctx().dev_mask(), Context::kGPU); CHECK_EQ(indices.dtype(), dst->aux_type(rowsparse::kIdx)) << "CopyRetainedRowsToGPU only supports same data type for idx array and dst aux_data(0)"; if (!src.storage_initialized() || indices.data().Size() == 0U) { op::FillZerosRspImpl(gpu_stream, *dst); return; } using namespace mshadow; const TBlob& src_data = src.data(); const TBlob& idx_data = indices.data(); const size_t row_length = src.shape().ProdShape(1, src.shape().ndim()); const size_t num_rows_retained = idx_data.Size(); dst->CheckAndAlloc({Shape1(num_rows_retained)}); TBlob dst_data = dst->data(); TBlob dst_idx_data = dst->aux_data(rowsparse::kIdx); MSHADOW_TYPE_SWITCH(src.dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(indices.dtype(), IType, { // copy idx array Tensor<gpu, 1, IType> dst_idx_tensor = dst_idx_data.FlatTo1D<gpu, IType>(gpu_stream); const Tensor<cpu, 1, IType> idx_tensor = idx_data.FlatTo1D<cpu, IType>(cpu_stream); Copy(dst_idx_tensor, idx_tensor, gpu_stream); // copy src data const Tensor<cpu, 2, DType> src_data_tensor = src_data.get_with_shape<cpu, 2, DType>( Shape2(src_data.shape_[0], row_length), cpu_stream); Tensor<gpu, 2, DType> dst_data_tensor = dst_data.get_with_shape<gpu, 2, DType>( Shape2(dst_data.shape_[0], row_length), gpu_stream); for (size_t i = 0; i < num_rows_retained; ++i) { Copy(dst_data_tensor[i], src_data_tensor[idx_tensor[i]], gpu_stream); } }) }) #else LOG(FATAL) << "GPU not enabled"; #endif } // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { if (stype == kDefaultStorage) { sorted_key_attrs_.push_back(std::make_tuple(key, shape, dtype)); } else { LOG(FATAL) << "storage type " << stype << " not implemented for device yet"; } } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &(buf.merged), priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) { CopyFromTo(buf.merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const bool use_copy, const int priority) override { LOG(FATAL) << "Not implemented yet"; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { cudaSetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) { int key = std::get<0>(sorted_key_attrs_[i]); TShape s = std::get<1>(sorted_key_attrs_[i]); int type = std::get<2>(sorted_key_attrs_[i]); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } buf.merged = NDArray(s, ctx, false, type); ctx_info[ctx.dev_id].second += s.Size(); } inited_ = true; } std::vector<KeyAttrs> sorted_key_attrs_; /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the gpu buffer std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; bool inited_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
omp_smithW-v1-refinedOrig.c
/********************************************************************************* * Smith€“Waterman algorithm * Purpose: Local alignment of nucleotide or protein sequences * Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro * * Compilation: g++ omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode * g++ omp_smithW.c -O3 -DNDEBUG=1 -o omp_smithW -fopenmp // production run * Execution: ./omp_smithW <number_of_col> <number_of_rows> * * Updated by C. Liao, Jan 2nd, 2019 *********************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> #include <assert.h> #include <chrono> #include <stdbool.h> // C99 does not support the boolean data type /*-------------------------------------------------------------------- * Text Tweaks */ #define RESET "\033[0m" #define BOLDRED "\033[1m\033[31m" /* Bold Red */ /* End of text tweaks */ /*-------------------------------------------------------------------- * Constants */ #define PATH -1 #define NONE 0 #define UP 1 #define LEFT 2 #define DIAGONAL 3 /* End of constants */ /*-------------------------------------------------------------------- * Helpers */ #define min(x, y) (((x) < (y)) ? (x) : (y)) #define max(a,b) ((a) > (b) ? a : b) #ifndef _OPENMP #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double omp_get_wtime() { return time_stamp(); } #endif // #define DEBUG /* End of Helpers */ /*-------------------------------------------------------------------- * Functions Prototypes */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos); int matchMissmatchScore(long long int i, long long int j); int backtrack(int* P, long long int maxPos); void printMatrix(int* matrix); void printPredecessorMatrix(int* matrix); void generate(void); long long int nElement(long long int i); void calcFirstDiagElement(long long int i, long long int *si, long long int *sj); /* End of prototypes */ /*-------------------------------------------------------------------- * Global Variables */ bool useBuiltInData=true; //Defines size of strings to be compared long long int m = 8 ; //Columns - Size of string a long long int n = 9; //Lines - Size of string b // the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s. //Defines scores int matchScore = 3; int missmatchScore = -3; int gapScore = -2; //Strings over the Alphabet Sigma char *a, *b; /* End of global variables */ /*-------------------------------------------------------------------- * Function: main */ int main(int argc, char* argv[]) { typedef std::chrono::time_point<std::chrono::system_clock> time_point; // thread_count is no longer used int thread_count; if (argc==3) { m = strtoll(argv[1], NULL, 10); n = strtoll(argv[2], NULL, 10); useBuiltInData = false; } //#ifdef DEBUG if (useBuiltInData) printf ("Using built-in data for testing ..\n"); printf("Problem size: Matrix[%lld][%lld]\n", n, m); //#endif //Allocates a and b a = (char*) malloc(m * sizeof(char)); b = (char*) malloc(n * sizeof(char)); //Because now we have zeros m++; n++; //Allocates similarity matrix H int *H; H = (int *) calloc(m * n, sizeof(int)); //Allocates predecessor matrix P int *P; P = (int *)calloc(m * n, sizeof(int)); unsigned long long sz = (m+n +2*m*n)*sizeof(int)/1024/1024; if (sz>=1024) printf("Total memory footprint is:%llu GB\n", sz/1024) ; else printf("Total memory footprint is:%llu MB\n", sz) ; if (useBuiltInData) { // https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example // Using the wiki example to verify the results b[0] = 'G'; b[1] = 'G'; b[2] = 'T'; b[3] = 'T'; b[4] = 'G'; b[5] = 'A'; b[6] = 'C'; b[7] = 'T'; b[8] = 'A'; a[0] = 'T'; a[1] = 'G'; a[2] = 'T'; a[3] = 'T'; a[4] = 'A'; a[5] = 'C'; a[6] = 'G'; a[7] = 'G'; } else { //Gen random arrays a and b generate(); } //Start position for backtrack long long int maxPos = 0; //Calculates the similarity matrix long long int i, j; // The way to generate all wavefront is to go through the top edge elements // starting from the left top of the matrix, go to the bottom top -> down, then left->right // total top edge element count = dim1_size + dim2_size -1 //Because now we have zeros ((m-1) + (n-1) - 1) long long int nDiag = m + n - 3; #ifdef DEBUG printf("nDiag=%d\n", nDiag); printf("Number of wavefront lines and their first element positions:\n"); #endif #ifdef _OPENMP #pragma omp parallel { #pragma omp master { thread_count = omp_get_num_threads(); printf ("Using %d out of max %d threads...", thread_count, omp_get_max_threads()); } } #endif //Gets Initial time // double initialTime = omp_get_wtime(); time_point starttime = std::chrono::system_clock::now(); #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i) { for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding { long long int nEle, si, sj; nEle = nElement(i); calcFirstDiagElement(i, &si, &sj); #pragma omp for private(j) for (j = 0; j < nEle; ++j) { // going upwards : anti-diagnol direction long long int ai = si - j ; // going up vertically long long int aj = sj + j; // going right in horizontal similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside } } } int len = backtrack(P, maxPos); time_point endtime = std::chrono::system_clock::now(); int elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(endtime-starttime).count(); printf("\nElapsed time: %d ms Path length: %d \n\n", elapsed, len); if (useBuiltInData) { printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false"); assert (H[n*m-1]==7); } #ifdef DEBUG printf("\nSimilarity Matrix:\n"); printMatrix(H); printf("\nPredecessor Matrix:\n"); printPredecessorMatrix(P); #endif //Frees similarity matrixes // free(H); // free(P); //Frees input arrays // free(a); // free(b); return 0; } /* End of main */ /*-------------------------------------------------------------------- * Function: nElement * Purpose: Calculate the number of i-diagonal's elements * i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored. */ long long int nElement(long long int i) { if (i < m && i < n) { // smaller than both directions //Number of elements in the diagonal is increasing return i; } else if (i < max(m, n)) { // smaller than only one direction //Number of elements in the diagonal is stable long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size return min - 1; } else { //Number of elements in the diagonal is decreasing long int min = min(m, n); return 2 * min - i + llabs(m - n) - 2; } } /*-------------------------------------------------------------------- * Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding * Purpose: Calculate the position of (si, sj)-element * n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront */ void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) { // Calculate the first element of diagonal if (i < n) { // smaller than row count *si = i; *sj = 1; // start from the j==1 since j==0 is the padding } else { // now we sweep horizontally at the bottom of the matrix *si = n - 1; // i is fixed *sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1 } } /* // understanding the calculation by an example n =6 // row m =2 // col padded scoring matrix n=7 m=3 0 1 2 ------- 0 x x x 1 x x x 2 x x x 3 x x x 4 x x x 5 x x x 6 x x x We should peel off top row and left column since they are the padding the remaining 6x2 sub matrix is what is interesting for us Now find the number of wavefront lines and their first element's position in the scoring matrix total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1 We use the left most element in each wavefront line as its first element. Then we have the first elements like (1,1), (2,1) (3,1) .. (6,1) (6,2) */ /*-------------------------------------------------------------------- * Function: SimilarityScore * Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j) * int *P; the predecessor array,storing which of the three elements is picked with max value */ void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) { int up, left, diag; //Stores index of element long long int index = m * i + j; //Get element above up = H[index - m] + gapScore; //Get element on the left left = H[index - 1] + gapScore; //Get element on the diagonal diag = H[index - m - 1] + matchMissmatchScore(i, j); //Calculates the maximum int max = NONE; int pred = NONE; /* === Matrix === * a[0] ... a[n] * b[0] * ... * b[n] * * generate 'a' from 'b', if '←' insert e '↑' remove * a=GAATTCA * b=GACTT-A * * generate 'b' from 'a', if '←' insert e '↑' remove * b=GACTT-A * a=GAATTCA */ if (diag > max) { //same letter ↖ max = diag; pred = DIAGONAL; } if (up > max) { //remove letter ↑ max = up; pred = UP; } if (left > max) { //insert letter ← max = left; pred = LEFT; } //Inserts the value in the similarity and predecessor matrixes H[index] = max; P[index] = pred; //Updates maximum score to be used as seed on backtrack if (max > H[*maxPos]) { #pragma omp critical *maxPos = index; } } /* End of similarityScore */ /*-------------------------------------------------------------------- * Function: matchMissmatchScore * Purpose: Similarity function on the alphabet for match/missmatch */ int matchMissmatchScore(long long int i, long long int j) { if (a[j - 1] == b[i - 1]) return matchScore; else return missmatchScore; } /* End of matchMissmatchScore */ /*-------------------------------------------------------------------- * Function: backtrack * Purpose: Modify matrix to print, path change from value to PATH */ int backtrack(int* P, long long int maxPos) { //hold maxPos value long long int predPos; int len = 0; //backtrack from maxPos to startPos = 0 do { switch (P[maxPos]) { case DIAGONAL: predPos = maxPos - m - 1; break; case UP: predPos = maxPos - m; break; case LEFT: predPos = maxPos - 1; break; default:; } #ifdef DEBUG P[maxPos] *= PATH; #endif maxPos = predPos; ++len; } while (P[maxPos] != NONE); return len; } /* End of backtrack */ /*-------------------------------------------------------------------- * Function: printMatrix * Purpose: Print Matrix */ void printMatrix(int* matrix) { long long int i, j; printf("-\t-\t"); for (j = 0; j < m-1; j++) { printf("%c\t", a[j]); } printf("\n-\t"); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c\t", b[i-1]); printf("%d\t", matrix[m * i + j]); } printf("\n"); } } /* End of printMatrix */ /*-------------------------------------------------------------------- * Function: printPredecessorMatrix * Purpose: Print predecessor matrix */ void printPredecessorMatrix(int* matrix) { long long int i, j, index; printf(" "); for (j = 0; j < m-1; j++) { printf("%c ", a[j]); } printf("\n "); for (i = 0; i < n; i++) { //Lines for (j = 0; j < m; j++) { if (j==0 && i>0) printf("%c ", b[i-1]); index = m * i + j; if (matrix[index] < 0) { printf(BOLDRED); if (matrix[index] == -UP) printf("↑ "); else if (matrix[index] == -LEFT) printf("← "); else if (matrix[index] == -DIAGONAL) printf("↖ "); else printf("- "); printf(RESET); } else { if (matrix[index] == UP) printf("↑ "); else if (matrix[index] == LEFT) printf("← "); else if (matrix[index] == DIAGONAL) printf("↖ "); else printf("- "); } } printf("\n"); } } /* End of printPredecessorMatrix */ /*-------------------------------------------------------------------- * Function: generate * Purpose: Generate arrays a and b */ void generate() { //Random seed srand(time(NULL)); //Generates the values of a long long int i; for (i = 0; i < m; i++) { int aux = rand() % 4; if (aux == 0) a[i] = 'A'; else if (aux == 2) a[i] = 'C'; else if (aux == 3) a[i] = 'G'; else a[i] = 'T'; } //Generates the values of b for (i = 0; i < n; i++) { int aux = rand() % 4; if (aux == 0) b[i] = 'A'; else if (aux == 2) b[i] = 'C'; else if (aux == 3) b[i] = 'G'; else b[i] = 'T'; } } /* End of generate */ /*-------------------------------------------------------------------- * External References: * http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1 * http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm * http://baba.sourceforge.net/ */
displacement_lagrangemultiplier_residual_frictional_contact_criteria.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_FRICTIONAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_FRICTIONAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "custom_strategies/custom_convergencecriterias/base_mortar_criteria.h" #include "utilities/color_utilities.h" #include "custom_utilities/active_set_utilities.h" #include "utilities/constraint_utilities.h" #include "custom_utilities/contact_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierResidualFrictionalContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems (only for frictional cases) * This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierResidualFrictionalContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierResidualFrictionalContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualFrictionalContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_NORMAL_RESIDUAL_IS_SET ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_STICK_RESIDUAL_IS_SET ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_SLIP_RESIDUAL_IS_SET ); /// The base class definition typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; /// The definition of the current class typedef DisplacementLagrangeMultiplierResidualFrictionalContactCriteria< TSparseSpace, TDenseSpace > ClassType; /// The dofs array type typedef typename BaseType::DofsArrayType DofsArrayType; /// The sparse matrix type typedef typename BaseType::TSystemMatrixType TSystemMatrixType; /// The dense vector type typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// Zero tolerance definition static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. */ explicit DisplacementLagrangeMultiplierResidualFrictionalContactCriteria() : BaseType() { } /** * @brief Default constructor. (with parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierResidualFrictionalContactCriteria(Kratos::Parameters ThisParameters) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Default constructor * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param RotRatioTolerance Relative tolerance for rotation residual error * @param RotAbsTolerance Absolute tolerance for rotation residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierResidualFrictionalContactCriteria( const double DispRatioTolerance, const double DispAbsTolerance, const double RotRatioTolerance, const double RotAbsTolerance, const double LMNormalRatioTolerance, const double LMNormalAbsTolerance, const double LMTangentStickRatioTolerance, const double LMTangentStickAbsTolerance, const double LMTangentSlipRatioTolerance, const double LMTangentSlipAbsTolerance, const double NormalTangentRatio, const bool EnsureContact = false, const bool PureSlip = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP, PureSlip); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false); // The displacement residual mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation residual mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; // The normal contact residual mLMNormalRatioTolerance = LMNormalRatioTolerance; mLMNormalAbsTolerance = LMNormalAbsTolerance; // The tangent contact residual mLMTangentStickRatioTolerance = LMTangentStickRatioTolerance; mLMTangentStickAbsTolerance = LMTangentStickAbsTolerance; mLMTangentSlipRatioTolerance = LMTangentSlipRatioTolerance; mLMTangentSlipAbsTolerance = LMTangentSlipAbsTolerance; // We get the ratio between the normal and tangent that will accepted as converged mNormalTangentRatio = NormalTangentRatio; } // Copy constructor. DisplacementLagrangeMultiplierResidualFrictionalContactCriteria( DisplacementLagrangeMultiplierResidualFrictionalContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) ,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm) ,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm) ,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance) ,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance) ,mLMNormalInitialResidualNorm(rOther.mLMNormalInitialResidualNorm) ,mLMNormalCurrentResidualNorm(rOther.mLMNormalCurrentResidualNorm) ,mLMTangentStickRatioTolerance(rOther.mLMTangentStickRatioTolerance) ,mLMTangentStickAbsTolerance(rOther.mLMTangentStickAbsTolerance) ,mLMTangentSlipRatioTolerance(rOther.mLMTangentSlipRatioTolerance) ,mLMTangentSlipAbsTolerance(rOther.mLMTangentSlipAbsTolerance) ,mLMTangentStickInitialResidualNorm(rOther.mLMTangentStickInitialResidualNorm) ,mLMTangentStickCurrentResidualNorm(rOther.mLMTangentStickCurrentResidualNorm) ,mStickCounter(rOther.mStickCounter) ,mSlipCounter(rOther.mSlipCounter) ,mNormalTangentRatio(rOther.mNormalTangentRatio) { } /// Destructor. ~DisplacementLagrangeMultiplierResidualFrictionalContactCriteria() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create(Parameters ThisParameters) const override { return Kratos::make_shared<ClassType>(ThisParameters); } /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Getting process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Initialize double disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0,normal_lm_residual_solution_norm = 0.0, tangent_lm_stick_residual_solution_norm = 0.0, tangent_lm_slip_residual_solution_norm = 0.0; IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0); // The nodes array auto& r_nodes_array = rModelPart.Nodes(); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; double residual_dof_value = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs #pragma omp parallel for firstprivate(dof_id,residual_dof_value) reduction(+:disp_residual_solution_norm, rot_residual_solution_norm, normal_lm_residual_solution_norm, tangent_lm_stick_residual_solution_norm, tangent_lm_slip_residual_solution_norm, disp_dof_num, rot_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id] == 1) { // The component of the residual residual_dof_value = rb[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X || r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y || r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) { // The normal of the node (TODO: how to solve this without accesing all the time to the database?) const auto it_node = r_nodes_array.find(it_dof->Id()); const double mu = it_node->GetValue(FRICTION_COEFFICIENT); if (mu < ZeroTolerance) { normal_lm_residual_solution_norm += std::pow(residual_dof_value, 2); } else { const double normal = it_node->FastGetSolutionStepValue(NORMAL)[r_curr_var.GetComponentIndex()]; const double normal_comp_residual = residual_dof_value * normal; normal_lm_residual_solution_norm += std::pow(normal_comp_residual, 2); if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) { tangent_lm_slip_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2); ++lm_slip_dof_num; } else { tangent_lm_stick_residual_solution_norm += std::pow(residual_dof_value - normal_comp_residual, 2); ++lm_stick_dof_num; } } ++lm_dof_num; } else if ((*p_check_disp)(r_curr_var)) { disp_residual_solution_norm += std::pow(residual_dof_value, 2); ++disp_dof_num; } else { // We will assume is rotation dof KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; rot_residual_solution_norm += std::pow(residual_dof_value, 2); ++rot_dof_num; } } } } // Auxiliar dofs counters if (mStickCounter > 0) { if (lm_stick_dof_num == 0) { mStickCounter = 0; mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false); } } else { if (lm_stick_dof_num > 0) { mStickCounter = lm_stick_dof_num; mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false); } } if (mSlipCounter > 0) { if (lm_slip_dof_num == 0) { mSlipCounter = 0; mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false); } } else { if (lm_slip_dof_num > 0) { mSlipCounter = lm_slip_dof_num; mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false); } } mDispCurrentResidualNorm = disp_residual_solution_norm; mRotCurrentResidualNorm = rot_residual_solution_norm; mLMNormalCurrentResidualNorm = normal_lm_residual_solution_norm; mLMTangentStickCurrentResidualNorm = tangent_lm_stick_residual_solution_norm; mLMTangentSlipCurrentResidualNorm = tangent_lm_slip_residual_solution_norm; double residual_disp_ratio = 1.0; double residual_rot_ratio = 1.0; double residual_normal_lm_ratio = 1.0; double residual_tangent_lm_stick_ratio = 1.0; double residual_tangent_lm_slip_ratio = 1.0; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm < ZeroTolerance) ? 1.0 : disp_residual_solution_norm; residual_disp_ratio = 1.0; if (mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { mRotInitialResidualNorm = (rot_residual_solution_norm < ZeroTolerance) ? 1.0 : rot_residual_solution_norm; residual_rot_ratio = 1.0; } mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET)) { mLMNormalInitialResidualNorm = (normal_lm_residual_solution_norm < ZeroTolerance) ? 1.0 : normal_lm_residual_solution_norm; residual_normal_lm_ratio = 1.0; mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the normal LM residual_normal_lm_ratio = mLMNormalCurrentResidualNorm/mLMNormalInitialResidualNorm; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET) && lm_stick_dof_num > 0) { mLMTangentStickInitialResidualNorm = (tangent_lm_stick_residual_solution_norm < ZeroTolerance) ? 1.0 : tangent_lm_stick_residual_solution_norm; residual_tangent_lm_stick_ratio = 1.0; mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, true); } if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET) && lm_slip_dof_num > 0) { mLMTangentSlipInitialResidualNorm = (tangent_lm_slip_residual_solution_norm < ZeroTolerance) ? 1.0 : tangent_lm_slip_residual_solution_norm; residual_tangent_lm_slip_ratio = 1.0; mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, true); } // We calculate the ratio of the tangent LM if (lm_stick_dof_num > 0) { residual_tangent_lm_stick_ratio = mLMTangentStickCurrentResidualNorm/mLMTangentStickInitialResidualNorm; } else { residual_tangent_lm_stick_ratio = 0.0; } if (lm_slip_dof_num > 0) { residual_tangent_lm_slip_ratio = mLMTangentSlipCurrentResidualNorm/mLMTangentSlipInitialResidualNorm; } else { residual_tangent_lm_slip_ratio = 0.0; } KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT) && residual_normal_lm_ratio < ZeroTolerance) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; // We calculate the absolute norms const double residual_disp_abs = mDispCurrentResidualNorm/static_cast<double>(disp_dof_num); const double residual_rot_abs = mRotCurrentResidualNorm/static_cast<double>(rot_dof_num); const double residual_normal_lm_abs = mLMNormalCurrentResidualNorm/static_cast<double>(lm_dof_num); const double residual_tangent_lm_stick_abs = lm_stick_dof_num > 0 ? mLMTangentStickCurrentResidualNorm/static_cast<double>(lm_dof_num) : 0.0; // const double residual_tangent_lm_stick_abs = lm_stick_dof_num > 0 ? mLMTangentStickCurrentResidualNorm/static_cast<double>(lm_stick_dof_num) : 0.0; const double residual_tangent_lm_slip_abs = lm_slip_dof_num > 0 ? mLMTangentSlipCurrentResidualNorm/static_cast<double>(lm_dof_num) : 0.0; // const double residual_tangent_lm_slip_abs = lm_slip_dof_num > 0 ? mLMTangentSlipCurrentResidualNorm/static_cast<double>(lm_slip_dof_num) : 0.0; const double normal_tangent_stick_ratio = residual_tangent_lm_stick_abs/residual_normal_lm_abs; const double normal_tangent_slip_ratio = residual_tangent_lm_slip_abs/residual_normal_lm_abs; // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << residual_normal_lm_ratio << mLMNormalRatioTolerance << residual_normal_lm_abs << mLMNormalAbsTolerance << residual_tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << residual_tangent_lm_stick_abs << mLMTangentStickAbsTolerance << residual_tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << residual_tangent_lm_slip_abs << mLMTangentSlipAbsTolerance; } else { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << residual_normal_lm_ratio << mLMNormalRatioTolerance << residual_normal_lm_abs << mLMNormalAbsTolerance << residual_tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << residual_tangent_lm_slip_abs << mLMTangentSlipAbsTolerance; } } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_normal_lm_ratio << mLMNormalRatioTolerance << residual_normal_lm_abs << mLMNormalAbsTolerance << residual_tangent_lm_stick_ratio << mLMTangentStickRatioTolerance << residual_tangent_lm_stick_abs << mLMTangentStickAbsTolerance << residual_tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << residual_tangent_lm_slip_abs << mLMTangentSlipAbsTolerance; } else { r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_normal_lm_ratio << mLMNormalRatioTolerance << residual_normal_lm_abs << mLMNormalAbsTolerance << residual_tangent_lm_slip_ratio << mLMTangentSlipRatioTolerance << residual_tangent_lm_slip_abs << mLMTangentSlipAbsTolerance; } } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tNORMAL LAGRANGE MUL: RATIO = ") << residual_normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << residual_normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl; KRATOS_INFO_IF("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria", mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) << BOLDFONT("\tSTICK LAGRANGE MUL: RATIO = ") << residual_tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentStickRatioTolerance << BOLDFONT(" ABS = ") << residual_tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentStickAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tSLIP LAGRANGE MUL: RATIO = ") << residual_tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentSlipRatioTolerance << BOLDFONT(" ABS = ") << residual_tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentSlipAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tNORMAL LAGRANGE MUL: RATIO = " << residual_normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << residual_normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl; KRATOS_INFO_IF("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria", mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) << "\tSTICK LAGRANGE MUL: RATIO = " << residual_tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentStickRatioTolerance << " ABS = " << residual_tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentStickAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tSLIP LAGRANGE MUL: RATIO = " << residual_tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentSlipRatioTolerance << " ABS = " << residual_tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentSlipAbsTolerance << std::endl; } } } // NOTE: Here we don't include the tangent counter part r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_normal_lm_ratio) ? residual_disp_ratio : residual_normal_lm_ratio; r_process_info[RESIDUAL_NORM] = (residual_normal_lm_abs > mLMNormalAbsTolerance) ? residual_normal_lm_abs : mLMNormalAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true; const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT) && residual_normal_lm_ratio == 0.0) ? true : (residual_normal_lm_ratio <= mLMNormalRatioTolerance || residual_normal_lm_abs <= mLMNormalAbsTolerance) && (residual_tangent_lm_stick_ratio <= mLMTangentStickRatioTolerance || residual_tangent_lm_stick_abs <= mLMTangentStickAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (residual_tangent_lm_slip_ratio <= mLMTangentSlipRatioTolerance || residual_tangent_lm_slip_abs <= mLMTangentSlipAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio); if (disp_converged && rot_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tResidual convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualFrictionalContactCriteria") << "\tResidual convergence is not achieved" << std::endl; } } return false; } } else { // In this case all the displacements are imposed! return true; } } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { // Initialize BaseType::mConvergenceCriteriaIsInitialized = true; // Check rotation dof mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); // Initialize header ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.Is(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("N.LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP)) { r_table.AddColumn("STI. RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("SLIP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flags mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } /** * @brief This function finalizes the non-linear iteration * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void FinalizeNonLinearIteration( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Calling base criteria BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_lagrangemultiplier_ressidual_frictional_contact_criteria", "ensure_contact" : false, "pure_slip" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "rotation_residual_relative_tolerance" : 1.0e-4, "rotation_residual_absolute_tolerance" : 1.0e-9, "contact_residual_relative_tolerance" : 1.0e-4, "contact_residual_absolute_tolerance" : 1.0e-9, "frictional_stick_contact_residual_relative_tolerance" : 1.0e-4, "frictional_stick_contact_residual_absolute_tolerance" : 1.0e-9, "frictional_slip_contact_residual_relative_tolerance" : 1.0e-4, "frictional_slip_contact_residual_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_lagrangemultiplier_ressidual_frictional_contact_criteria"; } ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "DisplacementLagrangeMultiplierResidualFrictionalContactCriteria"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement residual mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The rotation residual mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble(); // The normal contact residual mLMNormalRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); mLMNormalAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble(); // The tangent contact residual mLMTangentStickRatioTolerance = ThisParameters["frictional_stick_contact_residual_relative_tolerance"].GetDouble(); mLMTangentStickAbsTolerance = ThisParameters["frictional_stick_contact_residual_absolute_tolerance"].GetDouble(); mLMTangentSlipRatioTolerance = ThisParameters["frictional_slip_contact_residual_relative_tolerance"].GetDouble(); mLMTangentSlipAbsTolerance = ThisParameters["frictional_slip_contact_residual_absolute_tolerance"].GetDouble(); // We get the ratio between the normal and tangent that will accepted as converged mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_NORMAL_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_STICK_RESIDUAL_IS_SET, false); mOptions.Set(DisplacementLagrangeMultiplierResidualFrictionalContactCriteria::INITIAL_SLIP_RESIDUAL_IS_SET, false); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags double mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual double mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual double mDispInitialResidualNorm; /// The reference norm of the displacement residual double mDispCurrentResidualNorm; /// The current norm of the displacement residual double mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual double mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual double mRotInitialResidualNorm; /// The reference norm of the rotation residual double mRotCurrentResidualNorm; /// The current norm of the rotation residual double mLMNormalRatioTolerance; /// The ratio threshold for the norm of the normal LM residual double mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the normal LM residual double mLMNormalInitialResidualNorm; /// The reference norm of the normal LM residual double mLMNormalCurrentResidualNorm; /// The current norm of the normal LM residual double mLMTangentStickRatioTolerance; /// The ratio threshold for the norm of the tangent LM residual (stick) double mLMTangentStickAbsTolerance; /// The absolute value threshold for the norm of the tangent LM residual (stick) double mLMTangentSlipRatioTolerance; /// The ratio threshold for the norm of the tangent LM residual (slip) double mLMTangentSlipAbsTolerance; /// The absolute value threshold for the norm of the tangent LM residual (slip) double mLMTangentStickInitialResidualNorm; /// The reference norm of the tangent LM residual (stick) double mLMTangentStickCurrentResidualNorm; /// The current norm of the tangent LM residual (stick) double mLMTangentSlipInitialResidualNorm; /// The reference norm of the tangent LM residual (slip) double mLMTangentSlipCurrentResidualNorm; /// The current norm of the tangent LM residual (slip) std::size_t mStickCounter = 0; /// This is an auxiliar counter for stick dofs std::size_t mSlipCounter = 0; /// This is an auxiliar counter for slip dofs double mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case std::vector<int> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierResidualFrictionalContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(4)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(5)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_NORMAL_RESIDUAL_IS_SET(Kratos::Flags::Create(6)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_STICK_RESIDUAL_IS_SET(Kratos::Flags::Create(7)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_SLIP_RESIDUAL_IS_SET(Kratos::Flags::Create(8)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_FRICTIONAL_CONTACT_CRITERIA_H */
GB_memcpy.c
//------------------------------------------------------------------------------ // GB_memcpy: parallel memcpy //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Note that this function uses its own hard-coded chunk size. #include "GB.h" #define GB_MEM_CHUNK (1024*1024) void GB_memcpy // parallel memcpy ( void *dest, // destination const void *src, // source size_t n, // # of bytes to copy int nthreads // max # of threads to use ) { if (nthreads <= 1 || n <= GB_MEM_CHUNK) { //---------------------------------------------------------------------- // memcpy using a single thread //---------------------------------------------------------------------- memcpy (dest, src, n) ; } else { //---------------------------------------------------------------------- // memcpy using multiple threads //---------------------------------------------------------------------- size_t nchunks = 1 + (n / GB_MEM_CHUNK) ; if (((size_t) nthreads) > nchunks) { nthreads = (int) nchunks ; } GB_void *pdest = (GB_void *) dest ; const GB_void *psrc = (GB_void *) src ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (k = 0 ; k < nchunks ; k++) { size_t start = k * GB_MEM_CHUNK ; if (start < n) { size_t chunk = GB_IMIN (n - start, GB_MEM_CHUNK) ; memcpy (pdest + start, psrc + start, chunk) ; } } } }
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<VT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ assign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ addAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ subAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { multAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ multAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); divAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { divAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ divAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
GB_binop__gt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint16) // A*D function (colscale): GB (_AxD__gt_uint16) // D*A function (rowscale): GB (_DxB__gt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__gt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__gt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint16) // C=scalar+B GB (_bind1st__gt_uint16) // C=scalar+B' GB (_bind1st_tran__gt_uint16) // C=A+scalar GB (_bind2nd__gt_uint16) // C=A'+scalar GB (_bind2nd_tran__gt_uint16) // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT16 || GxB_NO_GT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
NDArray.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #ifndef NDARRAY_H #define NDARRAY_H #include <initializer_list> #include <functional> #include <shape.h> #include "NativeOpExcutioner.h" #include <memory/Workspace.h> #include <indexing/NDIndex.h> #include <indexing/IndicesList.h> #include <graph/Intervals.h> #include <array/DataType.h> #include <stdint.h> #include <array/ArrayOptions.h> #include <array/ArrayType.h> #include <array/ResultSet.h> namespace nd4j { template<typename T> class ND4J_EXPORT NDArray; ND4J_EXPORT NDArray<float> operator-(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator-(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator-(const double, const NDArray<double>&); ND4J_EXPORT NDArray<float> operator+(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator+(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator+(const double, const NDArray<double>&); template<typename T> NDArray<T> mmul(const NDArray<T>&, const NDArray<T>&); template<typename T> class NDArray { protected: /** * if true then array doesn't own buffer and simply points to another's buffer */ bool _isView = false; /** * pointer on flattened data array in memory */ T *_buffer = nullptr; /** * contains shape info: matrix rank, numbers of elements per each dimension, dimensions strides, element-wise-stride, c-like or fortan-like order */ Nd4jLong *_shapeInfo = nullptr; /** * pointer on externally allocated memory where _buffer and _shapeInfo are stored */ nd4j::memory::Workspace* _workspace = nullptr; /** * alternative buffers for special computational devices (like GPUs for CUDA) */ T* _bufferD = nullptr; Nd4jLong *_shapeInfoD = nullptr; /** * indicates whether user allocates memory for _buffer/_shapeInfo by himself, in opposite case the memory must be allocated from outside */ bool _isShapeAlloc = false; bool _isBuffAlloc = false; /** * Field to store cached length */ Nd4jLong _length = -1L; /** * type of array elements */ DataType _dataType = DataType_FLOAT; std::string toStringValue(T value); public: static NDArray<T>* createEmpty(nd4j::memory::Workspace* workspace = nullptr); static NDArray<T>* valueOf(const std::initializer_list<Nd4jLong>& shape, const T value, const char order = 'c'); static NDArray<T>* valueOf(const std::vector<Nd4jLong>& shape, const T value, const char order = 'c'); static NDArray<T>* linspace(const T from, const T to, const Nd4jLong numElements); static NDArray<T>* scalar(const T value); /** * default constructor, do not allocate memory, memory for array is passed from outside */ NDArray(T *buffer = nullptr, Nd4jLong* shapeInfo = nullptr, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::initializer_list<Nd4jLong> shape, nd4j::memory::Workspace* workspace = nullptr); /** * Constructor for scalar NDArray */ NDArray(T scalar); /** * copy constructor */ NDArray(const NDArray<T>& other); /** * move constructor */ NDArray(NDArray<T>&& other) noexcept; #ifndef __JAVACPP_HACK__ // this method only available out of javacpp /** * This constructor creates vector of T * * @param values */ NDArray(std::initializer_list<T> values, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::vector<T> &values, nd4j::memory::Workspace* workspace = nullptr); #endif /** * constructor, create empty array stored at given workspace */ NDArray(nd4j::memory::Workspace* workspace); /** * this constructor creates new NDArray with shape matching "other" array, do not copy "other" elements into new array */ NDArray(const NDArray<T> *other, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently */ NDArray(const Nd4jLong* shapeInfo, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using shape information contained in vector argument */ NDArray(const char order, const std::vector<Nd4jLong> &shape, nd4j::memory::Workspace* workspace = nullptr); /** * This constructor creates new array with elements copied from data and using shape information stored in shape * * PLEASE NOTE: data will be copied AS IS, without respect to specified order. You must ensure order match here. */ NDArray(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using given buffer (without memory allocating) and shape information stored in shape */ NDArray(T *buffer, const char order, const std::vector<Nd4jLong> &shape , nd4j::memory::Workspace* workspace = nullptr); /** * copy assignment operator */ NDArray<T>& operator=(const NDArray<T>& other); /** * move assignment operator */ NDArray<T>& operator=(NDArray<T>&& other) noexcept; /** * assignment operator, assigns the same scalar to all array elements */ NDArray<T>& operator=(const T scalar); /** * operators for memory allocation and deletion */ void* operator new(size_t i); void operator delete(void* p); /** * method replaces existing buffer/shapeinfo, AND releases original pointers (if releaseExisting TRUE) */ void replacePointers(T *buffer, Nd4jLong *shapeInfo, const bool releaseExisting = true); /** * create a new array by replicating current array by repeats times along given dimension * dimension - dimension along which to repeat elements * repeats - number of repetitions */ NDArray<T>* repeat(int dimension, const std::vector<Nd4jLong>& repeats) const; /** * fill target array by repeating current array * dimension - dimension along which to repeat elements */ void repeat(int dimension, NDArray<T>& target) const; /** * return _dataType; */ DataType dataType() const; /** * creates array which is view of this array */ NDArray<T>* getView(); /** * creates array which points on certain sub-range of this array, sub-range is defined by given indices */ NDArray<T> *subarray(IndicesList& indices) const; NDArray<T> *subarray(IndicesList& indices, std::vector<Nd4jLong>& strides) const; NDArray<T>* subarray(const std::initializer_list<NDIndex*>& idx) const; NDArray<T>* subarray(const Intervals& idx) const; /** * cast array elements to given dtype */ NDArray<T>* cast(DataType dtype); void cast(NDArray<T>* target, DataType dtype); /** * returns _workspace */ nd4j::memory::Workspace* getWorkspace() const { return _workspace; } /** * returns _buffer */ T* getBuffer() const; T* buffer(); /** * returns _shapeInfo */ Nd4jLong* shapeInfo(); Nd4jLong* getShapeInfo() const; /** * if _bufferD==nullptr return _buffer, else return _bufferD */ T* specialBuffer(); /** * Returns True if it's legally empty NDArray, or false otherwise * @return */ FORCEINLINE bool isEmpty() const; /** * if _shapeInfoD==nullptr return _shapeInfo, else return _shapeInfoD */ Nd4jLong* specialShapeInfo(); /** * set values for _bufferD and _shapeInfoD */ void setSpecialBuffers(T * buffer, Nd4jLong *shape); /** * permutes (in-place) the dimensions in array according to "dimensions" array */ bool permutei(const std::initializer_list<int>& dimensions); bool permutei(const std::vector<int>& dimensions); bool permutei(const int* dimensions, const int rank); bool permutei(const std::initializer_list<Nd4jLong>& dimensions); bool permutei(const std::vector<Nd4jLong>& dimensions); bool permutei(const Nd4jLong* dimensions, const int rank); bool isFinite(); bool hasNaNs(); bool hasInfs(); /** * permutes the dimensions in array according to "dimensions" array, new array points on _buffer of this array */ NDArray<T>* permute(const std::initializer_list<int>& dimensions) const; NDArray<T>* permute(const std::vector<int>& dimensions) const; NDArray<T>* permute(const int* dimensions, const int rank) const; void permute(const int* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<int>& dimensions, NDArray<T>& target) const; NDArray<T>* permute(const std::initializer_list<Nd4jLong>& dimensions) const; NDArray<T>* permute(const std::vector<Nd4jLong>& dimensions) const; NDArray<T>* permute(const Nd4jLong* dimensions, const int rank) const; void permute(const Nd4jLong* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<Nd4jLong>& dimensions, NDArray<T>& target) const; /** * This method streamlines given view or permuted array, and reallocates buffer */ void streamline(char order = 'a'); /** * check whether array is contiguous in memory */ bool isContiguous(); /** * prints information about array shape * msg - message to print out */ void printShapeInfo(const char * msg = nullptr) const; /** * prints buffer elements * msg - message to print out * limit - number of array elements to print out */ void printBuffer(const char* msg = nullptr, Nd4jLong limit = -1); /** * prints buffer elements, takes into account offset between elements (element-wise-stride) * msg - message to print out * limit - number of array elements to print out */ void printIndexedBuffer(const char* msg = nullptr, Nd4jLong limit = -1) const; std::string asIndexedString(Nd4jLong limit = -1); std::string asString(Nd4jLong limit = -1); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>* other); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>& other); /** * this method assigns given value to all elements in array */ void assign(const T value); /** * returns new copy of this array, optionally in different order */ NDArray<T> *dup(const char newOrder = 'a'); /** * returns sum of all elements of array */ T sumNumber() const; /** * returns mean number of array */ T meanNumber() const; /** * This method explicitly enforces new shape for this NDArray, old shape/stride information is lost */ void enforce(const std::initializer_list<Nd4jLong> &dimensions, char order = 'a'); void enforce(std::vector<Nd4jLong> &dimensions, char order = 'a'); /** * calculates sum along dimension(s) in this array and save it to created reduced array * dimensions - array of dimensions to calculate sum over * keepDims - if true then put unities in place of reduced dimensions */ NDArray<T> *sum(const std::vector<int> &dimensions) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector, result is stored in new array to be returned * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions */ template<typename OpName> NDArray<T>* reduceAlongDimension(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T>* reduceAlongDimension(const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T> reduceAlongDims(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector * target - where to save result of reducing * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions * extras - extra parameters */ template<typename OpName> void reduceAlongDimension(NDArray<T>* target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, T *extras = nullptr) const; /** * return variance of array elements set * biasCorrected - if true bias correction will be applied */ template<typename OpName> T varianceNumber(bool biasCorrected = true); /** * apply scalar operation to array * extraParams - extra parameters for operation */ template<typename OpName> T reduceNumber(T *extraParams = nullptr) const; /** * returns element index which corresponds to some condition imposed by operation * extraParams - extra parameters for operation */ template<typename OpName> Nd4jLong indexReduceNumber(T *extraParams = nullptr); /** * returns index of max element in a given array (optionally: along given dimension(s)) * dimensions - optional vector with dimensions */ Nd4jLong argMax(std::initializer_list<int> dimensions = {}); /** * apply OpName transformation directly to array * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(T *extraParams = nullptr); /** * apply OpName transformation to array and store result in target * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(NDArray<T> *target, T *extraParams = nullptr); /** * apply OpName transformation to this array and store result in new array being returned * extraParams - extra parameters for operation */ template<typename OpName> NDArray<T> transform(T *extraParams = nullptr) const; /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array * other - second array necessary for pairwise operation * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, T *extraParams); /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array * other - second array necessary for pairwise operation * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, NDArray<T> *target, T *extraParams); /** * apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this) * tad - array to broadcast * dimensions - dimensions array to broadcast along * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyBroadcast(std::initializer_list<int> dimensions, const NDArray<T>* tad, NDArray<T>* target = nullptr, T* extraArgs = nullptr); template <typename OpName> void applyBroadcast(std::vector<int> &dimensions, const NDArray<T> *tad, NDArray<T> *target = nullptr, T *extraArgs = nullptr); /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * extraParams - extra parameters for operation */ template <typename OpName> NDArray<T> applyTrueBroadcast(const NDArray<T>& other, T *extraArgs = nullptr) const; template <typename OpName> NDArray<T>* applyTrueBroadcast(const NDArray<T>* other, T *extraArgs = nullptr) const; /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * target - where to store result * checkTargetShape - if true check whether target shape is suitable for broadcasting * extraParams - extra parameters for operation */ template <typename OpName> void applyTrueBroadcast(const NDArray<T>* other, NDArray<T>* target, const bool checkTargetShape = true, T *extraArgs = nullptr) const; /** * apply a scalar operation to an array * scalar - input scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(T scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; /** * apply a scalar operation to an array * scalar - input array which is simple scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(NDArray<T>& scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; #ifndef __JAVACPP_HACK__ /** * apply operation "func" to an array * func - what operation to apply * target - where to store result */ void applyLambda(const std::function<T(T)>& func, NDArray<T>* target = nullptr); void applyIndexedLambda(const std::function<T(Nd4jLong, T)>& func, NDArray<T>* target = nullptr); /** * apply pairwise operation "func" to an array * other - input array * func - what pairwise operation to apply * target - where to store result */ void applyPairwiseLambda(const NDArray<T>* other, const std::function<T(T, T)>& func, NDArray<T>* target = nullptr); void applyIndexedPairwiseLambda(NDArray<T>* other, const std::function<T(Nd4jLong, T, T)>& func, NDArray<T>* target = nullptr); void applyTriplewiseLambda(NDArray<T>* second, NDArray<T> *third, const std::function<T(T, T, T)>& func, NDArray<T>* target = nullptr); #endif /** * apply OpName random operation to array * buffer - pointer on RandomBuffer * y - optional input array * z - optional input array * extraArgs - extra parameters for operation */ template<typename OpName> void applyRandom(nd4j::random::RandomBuffer *buffer, NDArray<T>* y = nullptr, NDArray<T>* z = nullptr, T* extraArgs = nullptr); /** * apply transpose operation to the copy of this array, that is this array remains unaffected */ NDArray<T>* transpose() const; NDArray<T> transp() const; /** * perform transpose operation and store result in target, this array remains unaffected * target - where to store result */ void transpose(NDArray<T>& target) const; /** * apply in-place transpose operation to this array, so this array becomes transposed */ void transposei(); /** * return array pointing on certain range of this array * index - the number of array to be returned among set of possible arrays * dimensions - array of dimensions to point on */ NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::initializer_list<int>& dimensions) const; NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::vector<int>& dimensions) const; /** * returns the number of arrays pointing on specified dimension(s) * dimensions - array of dimensions to point on */ Nd4jLong tensorsAlongDimension(const std::initializer_list<int> dimensions) const ; Nd4jLong tensorsAlongDimension(const std::vector<int>& dimensions) const ; /** * returns true if elements of two arrays are equal to within given epsilon value * other - input array to compare * eps - epsilon, this value defines the precision of elements comparison */ bool equalsTo(const NDArray<T> *other, T eps = (T) 1e-5f) const; bool equalsTo(NDArray<T> &other, T eps = (T) 1e-5f) const; /** * add given row vector to all rows of this array * row - row vector to add */ void addiRowVector(const NDArray<T> *row); /** * add given row vector to all rows of this array, store result in target * row - row vector to add * target - where to store result */ void addRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * subtract given row vector from all rows of this array, store result in target * row - row vector to subtract * target - where to store result */ void subRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * multiply all rows of this array on given row vector, store result in target * row - row vector to multiply on * target - where to store result */ void mulRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * divide all rows of this array on given row vector, store result in target * row - row vector to divide on * target - where to store result */ void divRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * add given column vector to all columns of this array, store result in target * column - column vector to add * target - where to store result */ void addColumnVector(const NDArray<T> *column, NDArray<T>* target) const; /** * add given column vector to all columns of this array, this array becomes affected (in-place operation) * column - column vector to add */ void addiColumnVector(const NDArray<T> *column); /** * multiply all columns of this array on given column vector, this array becomes affected (in-place operation) * column - column vector to multiply on */ void muliColumnVector(const NDArray<T> *column); /** * returns number of bytes used by _buffer & _shapeInfo */ Nd4jLong memoryFootprint(); /** * these methods suited for FlatBuffers use */ std::vector<T> getBufferAsVector(); std::vector<Nd4jLong> getShapeAsVector(); std::vector<Nd4jLong> getShapeInfoAsVector(); std::vector<int64_t> getShapeInfoAsFlatVector(); /** * set new order and shape in case of suitable array length (in-place operation) * order - order to set * shape - shape to set * * if there was permute applied before or there are weird strides, then new buffer is allocated for array */ bool reshapei(const char order, const std::initializer_list<Nd4jLong>& shape); bool reshapei(const char order, const std::vector<Nd4jLong>& shape); bool reshapei(const std::initializer_list<Nd4jLong>& shape); bool reshapei(const std::vector<Nd4jLong>& shape); /** * creates new array with corresponding order and shape, new array will point on _buffer of this array * order - order to set * shape - shape to set * * if permute have been applied before or there are weird strides, then new buffer is allocated for new array */ NDArray<T>* reshape(const char order, const std::vector<Nd4jLong>& shape) const; /** * calculate strides and set given order * order - order to set */ void updateStrides(const char order); /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions */ void tilei(const std::vector<Nd4jLong>& repeats); /** * returns new array which is created by by repeating of this array the number of times given by reps * repeats - contains numbers of repetitions */ NDArray<T> tile(const std::vector<Nd4jLong>& repeats) const; /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions * target - where to store result */ void tile(const std::vector<Nd4jLong>& repeats, NDArray<T>& target) const; /** * change an array by repeating it the number of times to acquire the new shape which is the same as target shape * target - where to store result */ void tile(NDArray<T>& target) const; /** * returns an array which is result of broadcasting of this and other arrays * other - input array */ NDArray<T>* broadcast(const NDArray<T>& other); /** * check whether array's rows (arg=0) or columns (arg=1) create orthogonal basis * arg - 0 -> row, 1 -> column */ bool hasOrthonormalBasis(const int arg); /** * check whether array is identity matrix */ bool isIdentityMatrix(); /** * check whether array is unitary matrix */ bool isUnitary(); /** * reduces dimensions in this array relying on index operation OpName * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyIndexReduce(const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * reduces dimensions in array relying on index operation OpName * target - where to store result * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> void applyIndexReduce(const NDArray<T>* target, const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const T* extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (tads not axis) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyAllReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * apply reduce3 (exec) operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (same as reduceAlongDimension) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * returns variance along given dimensions * biasCorrected - if true bias correction will be applied * dimensions - vector of dimensions to calculate variance along */ template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::vector<int>& dimensions) const; template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::initializer_list<int>& dimensions) const; template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::vector<int>& dimensions); template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::initializer_list<int>& dimensions); /** * operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals * idx - intervals of indexes which define the sub-arrays to point on * keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b} */ NDArray<T> operator()(const Intervals& idx, bool keepUnitiesInShape = false) const; /** * operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals * idx - intervals of indexes which define the sub-arrays to point on, idx has form {dim0Start,dim0End, dim1Start,dim1End, ....} and length (2 * this->rankOf()) * when (dimStart == dimEnd) then whole range will be used for current dimension * keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b} */ NDArray<T> operator()(const Nd4jLong* idx, bool keepUnitiesInShape = false) const; /** * addition operator: array + other * other - input array to add */ NDArray<T> operator+(const NDArray<T>& other) const; /** * addition operator: array + scalar * scalar - input scalar to add */ NDArray<T> operator+(const T scalar) const; /** * friend functions which implement addition operator: scalar + array * scalar - input scalar to add */ friend NDArray<float> nd4j::operator+(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator+(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator+(const double scalar, const NDArray<double>& arr); /** * addition unary operator array += other * other - input array to add */ void operator+=(const NDArray<T>& other); /** * subtraction unary operator array -= other * other - input array to add */ void operator-=(const NDArray<T>& other); void operator+=(const T other); void operator-=(const T other); /** * subtraction operator: array - other * other - input array to subtract */ NDArray<T> operator-(const NDArray<T>& other) const; /** * subtraction operator: array - scalar * scalar - input scalar to subtract */ NDArray<T> operator-(const T& scalar) const; /** * negative operator, it changes sign of all array elements on opposite */ NDArray<T> operator-() const; /** * friend functions which implement subtraction operator: scalar - array * scalar - input scalar to subtract */ friend NDArray<float> nd4j::operator-(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator-(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator-(const double scalar, const NDArray<double>& arr); /** * pairwise multiplication operator: array * other * other - input array to multiply on */ NDArray<T> operator*(const NDArray<T>& other) const; /** * multiplication operator: array * scalar * scalar - input scalar to multiply on */ NDArray<T> operator*(const T scalar) const; /** * pairwise multiplication unary operator array *= other * other - input array to multiply on */ void operator*=(const NDArray<T>& other); /** * multiplication unary operator array *= scalar * scalar - input scalar to multiply on */ void operator*=(const T scalar); /** * pairwise division operator: array / other * other - input array to divide on */ NDArray<T> operator/(const NDArray<T>& other) const; /** * division operator: array / scalar * scalar - input scalar to divide each array element on */ NDArray<T> operator/(const T scalar) const; /** * pairwise division unary operator: array /= other * other - input array to divide on */ void operator/=(const NDArray<T>& other); /** * division unary operator: array /= scalar * scalar - input scalar to divide on */ void operator/=(const T scalar); /** * friend function which implements mathematical multiplication of two arrays * left - input array * right - input array */ friend NDArray<T> mmul<>(const NDArray<T>& left, const NDArray<T>& right); /** * this method assigns elements of other array to the sub-array of this array defined by given intervals * other - input array to assign elements from * idx - intervals of indexes which define the sub-array */ void assign(const NDArray<T>& other, const Intervals& idx); /** * return vector containing _buffer as flat binary array */ std::vector<int8_t> asByteVector(); /** * makes array to be identity matrix (not necessarily square), that is set all diagonal elements = 1, rest = 0 */ void setIdentity(); /** * swaps the contents of tow arrays, * PLEASE NOTE: method doesn't take into account the shapes of arrays, shapes may be different except one condition: arrays lengths must be the same */ void swapUnsafe(NDArray<T>& other); /** * return vector with buffer which points on corresponding diagonal elements of array * type - means of vector to be returned: column ('c') or row ('r') */ NDArray<T>* diagonal(const char type ) const; /** * fill matrix with given value starting from specified diagonal in given direction, works only with 2D matrix * * diag - diagonal starting from matrix is filled. * diag = 0 corresponds to main diagonal, * diag < 0 below main diagonal * diag > 0 above main diagonal * direction - in what direction to fill matrix. There are 2 possible directions: * 'u' - fill up, mathematically this corresponds to lower triangular matrix * 'l' - fill down, mathematically this corresponds to upper triangular matrix */ void setValueInDiagMatrix(const T& value, const int diag, const char direction); /** * change an array by repeating it the number of times in order to acquire new shape equal to the input shape * * shape - contains new shape to broadcast array to * target - optional argument, if target != nullptr the resulting array will be placed it target, in opposite case tile operation is done in place */ void tileToShape(const std::vector<Nd4jLong>& shape, NDArray<T>* target = nullptr); void tileToShape(const std::initializer_list<Nd4jLong>& shape, NDArray<T>* target = nullptr); template <typename N> NDArray<N>* asT(); /** * calculates the trace of an array, that is sum of elements on main diagonal = sum array[i, i, i, ...] */ T getTrace() const; /** * fill array linearly as follows: arr[0] = from, arr[1] = from+step, arr[2] = from+2*step, ... */ void linspace(const T from, const T step = 1.0f); NDArray<T>* createUninitialized() const; ResultSet<T>* multipleTensorsAlongDimension(const std::vector<int>& indices, const std::vector<int>& dimensions) const; ResultSet<T>* allTensorsAlongDimension(const std::vector<int>& dimensions) const; ResultSet<T>* allTensorsAlongDimension(const std::initializer_list<int>& dimensions) const; ResultSet<T>* allExamples()const ; /** * default destructor */ ~NDArray() noexcept; /** * set _shapeInfo */ FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo); /** * set _buffer */ FORCEINLINE void setBuffer(T* buffer); /** * set _isBuffAlloc and _isShapeAlloc */ FORCEINLINE void triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated); /** * returns the value of "dim" dimension */ Nd4jLong sizeAt(int dim) const; /** * returns order of array */ FORCEINLINE char ordering() const; /** * return _isView */ FORCEINLINE bool isView(); /** * returns shape portion of shapeInfo */ FORCEINLINE Nd4jLong* shapeOf() const; /** * returns strides portion of shapeInfo */ FORCEINLINE Nd4jLong* stridesOf() const; /** * returns rank of array */ FORCEINLINE int rankOf() const; /** * returns length of array */ FORCEINLINE Nd4jLong lengthOf() const; /** * returns number of rows in array */ FORCEINLINE Nd4jLong rows() const; /** * returns number of columns in array */ FORCEINLINE Nd4jLong columns() const; /** * returns size of array elements type */ FORCEINLINE int sizeOfT() const; /** * returns element-wise-stride */ FORCEINLINE Nd4jLong ews() const; // returns true if arrays have same shape FORCEINLINE bool isSameShape(const NDArray<T> *other) const; FORCEINLINE bool isSameShape(NDArray<T> &other) const; FORCEINLINE bool isSameShape(const std::initializer_list<Nd4jLong>& shape) const; FORCEINLINE bool isSameShape(const std::vector<Nd4jLong>& shape) const; /** * returns true if these two NDArrays have same rank, dimensions, strides, ews and order */ FORCEINLINE bool isSameShapeStrict(const NDArray<T> *other) const; /** * returns true if buffer && shapeInfo were defined (non nullptr) */ FORCEINLINE bool nonNull() const; /** * returns array element with given index from linear buffer * i - element index in array */ FORCEINLINE T getScalar(const Nd4jLong i) const; /** * returns array element with given index, takes into account offset between elements (element-wise-stride) * i - element index in array */ FORCEINLINE T getIndexedScalar(const Nd4jLong i) const; /** * returns element with given indexes from 2D array * i - number of row * j - number of column */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j) const; /** * returns element with given indexes from 3D array * i - height * j - width * k - depth */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * assigns given scalar to array element by given index, takes into account offset between elements (element-wise-stride) * i - element index in array * value - scalar value to assign */ FORCEINLINE void putIndexedScalar(const Nd4jLong i, const T value); /** * assigns given scalar to array element by given index, regards array buffer as linear * i - element index in array * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const T value); /** * assigns given scalar to 2D array element by given indexes * i - number of row * j - number of row * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const T value); /** * assigns given scalar to 3D array element by given indexes * i - height * j - width * k - depth * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value); /** * returns true if array is 2D */ FORCEINLINE bool isMatrix() const; /** * returns true if array is vector */ FORCEINLINE bool isVector() const; /** * returns true if array is column vector */ FORCEINLINE bool isColumnVector() const; /** * returns true if array is row vector */ FORCEINLINE bool isRowVector() const; /** * returns true if array is scalar */ FORCEINLINE bool isScalar() const; /** * inline accessing operator for matrix, i - absolute index */ FORCEINLINE T operator()(const Nd4jLong i) const; /** * inline modifying operator for matrix, i - absolute index */ FORCEINLINE T& operator()(const Nd4jLong i); /** * inline accessing operator for 2D array, i - row, j - column */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j) const; /** * inline modifying operator for 2D array, i - row, j - column */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j); /** * inline accessing operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * inline modifying operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k); /** * inline modifying operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w); /** * inline accessing operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const; template <typename T2> FORCEINLINE std::vector<T2> asVectorT(); FORCEINLINE bool isAttached(); NDArray<T>* detach(); FORCEINLINE bool operator == (const NDArray<T> &other) const; }; ////////////////////////////////////////////////////////////////////////// ///// IMLEMENTATION OF INLINE METHODS ///// ////////////////////////////////////////////////////////////////////////// template <typename T> template <typename T2> std::vector<T2> NDArray<T>::asVectorT() { std::vector<T2> result(this->lengthOf()); #pragma omp parallel for simd for (int e = 0; e < this->lengthOf(); e++) result[e] = static_cast<T2>(this->getIndexedScalar(e)); return result; } template<typename T> bool NDArray<T>::isAttached() { return this->_workspace != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setShapeInfo(Nd4jLong *shapeInfo) { if(_isShapeAlloc && _workspace == nullptr) delete []_shapeInfo; _shapeInfo = shapeInfo; _isShapeAlloc = false; if (shapeInfo != nullptr) this->_length = shape::length(shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setBuffer(T* buffer) { if(_isBuffAlloc && _workspace == nullptr) delete []_buffer; _buffer = buffer; _isBuffAlloc = false; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated) { _isBuffAlloc = bufferAllocated; _isShapeAlloc = shapeAllocated; } ////////////////////////////////////////////////////////////////////////// template<typename T> char NDArray<T>::ordering() const { return shape::order(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isView() { return _isView; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::shapeOf() const { return shape::shapeOf(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::stridesOf() const { return shape::stride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::rankOf() const { if (isEmpty()) return 0; return shape::rank(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::lengthOf() const { return _length; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::rows() const { if (this->rankOf() == 1) return 1; if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have rows"); return shapeOf()[0]; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::columns() const { if (this->rankOf() == 1) return this->lengthOf(); if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have columns"); return shapeOf()[1]; } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::sizeOfT() const { return sizeof(T); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::ews() const { if (this->isEmpty() || this->rankOf() == 0) return 1; return shape::elementWiseStride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::nonNull() const { if (isEmpty()) return true; return this->_buffer != nullptr && this->_shapeInfo != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isMatrix() const { if (isEmpty()) return false; return shape::isMatrix(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isVector() const { if (isEmpty()) return false; return !isScalar() && shape::isVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isColumnVector() const { if (isEmpty()) return false; return !isScalar() && shape::isColumnVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isRowVector() const { if (isEmpty()) return false; // 1D edge case if (shape::rank(this->_shapeInfo) == 1) return true; return !isScalar() && shape::isRowVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isScalar() const { return shape::isScalar(this->_shapeInfo); } // accessing operator for matrix, i - absolute index template<typename T> T NDArray<T>::operator()(const Nd4jLong i) const { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): dinput index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); char order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); Nd4jLong offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // modifying operator for matrix, i - absolute index template<typename T> T& NDArray<T>::operator()(const Nd4jLong i) { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): input index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); auto order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); auto offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // accessing operator for 2D matrix, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) const { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 2D matrix, i - row, j - column template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // accessing operator for 3D array, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || j >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 3D array template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || k >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T& NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // Return value from linear buffer template<typename T> T NDArray<T>::getScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// template<typename T> T NDArray<T>::getIndexedScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// // Returns value from 2D matrix by coordinates/indexes template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j) const { return (*this)(i, j); } ////////////////////////////////////////////////////////////////////////// // returns value from 3D tensor by coordinates template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { return (*this)(i, j, k); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::putIndexedScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in linear buffer to position i template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 2D matrix to position i, j template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const T value) { (*this)(i,j) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 3D matrix to position i,j,k template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value) { (*this)(i,j,k) = value; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::memoryFootprint() { Nd4jLong size = this->lengthOf() * this->sizeOfT(); size += shape::shapeInfoByteLength(this->rankOf()); return size; } ////////////////////////////////////////////////////////////////////////// // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShape(const std::vector<Nd4jLong>& shape) const{ if (this->isScalar() && shape.size() == 1 && shape[0] == 0) return true; if (this->rankOf() != (int) shape.size()) return false; for (int e = 0; e < this->rankOf(); e++) { if (this->shapeOf()[e] != shape.at(e) && shape.at(e) != -1) return false; } return true; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const NDArray<T> *other) const { if (this->isEmpty() != other->isEmpty()) return false; return isSameShape(std::vector<Nd4jLong>(other->_shapeInfo+1, other->_shapeInfo+1+other->_shapeInfo[0])); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(NDArray<T> &other) const { return isSameShape(&other); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const std::initializer_list<Nd4jLong>& other) const { return isSameShape(std::vector<Nd4jLong>(other)); } ////////////////////////////////////////////////////////////////////////// // returns true if these two NDArrays have same _shapeInfo // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShapeStrict(const NDArray<T> *other) const { return shape::equalsStrict(_shapeInfo, other->_shapeInfo); } template<typename T> bool NDArray<T>::isEmpty() const { return ArrayOptions::arrayType(this->getShapeInfo()) == ArrayType::EMPTY; } template <typename T> bool NDArray<T>::operator ==(const NDArray<T> &other) const { if (!this->isSameShape(&other)) return false; return this->equalsTo(&other); } } #endif
sam_layer.c
#include "sam_layer.h" #include "utils.h" #include "dark_cuda.h" #include "blas.h" #include <stdio.h> #include <assert.h> layer make_sam_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr,"scale Layer: %d\n", index); layer l = { (LAYER_TYPE)0 }; l.type = SAM; l.batch = batch; l.w = w; l.h = h; l.c = c; l.out_w = w2; l.out_h = h2; l.out_c = c2; assert(l.out_c == l.c); assert(l.w == l.out_w && l.h == l.out_h); l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.outputs; l.index = index; l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.output = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.forward = forward_sam_layer; l.backward = backward_sam_layer; #ifdef GPU l.forward_gpu = forward_sam_layer_gpu; l.backward_gpu = backward_sam_layer_gpu; l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); l.output_gpu = cuda_make_array(l.output, l.outputs*batch); #endif return l; } void resize_sam_layer(layer *l, int w, int h) { l->out_w = w; l->out_h = h; l->outputs = l->out_w*l->out_h*l->out_c; l->inputs = l->outputs; l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float)); l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float)); #ifdef GPU cuda_free(l->output_gpu); cuda_free(l->delta_gpu); l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); #endif } void forward_sam_layer(const layer l, network_state state) { int size = l.batch * l.out_c * l.out_w * l.out_h; //int channel_size = 1; float *from_output = state.net.layers[l.index].output; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { l.output[i] = state.input[i] * from_output[i]; } activate_array(l.output, l.outputs*l.batch, l.activation); } void backward_sam_layer(const layer l, network_state state) { gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); //axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1); //scale_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta); int size = l.batch * l.out_c * l.out_w * l.out_h; //int channel_size = 1; float *from_output = state.net.layers[l.index].output; float *from_delta = state.net.layers[l.index].delta; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { state.delta[i] += l.delta[i] * from_output[i]; // l.delta * from (should be divided by channel_size?) from_delta[i] = state.input[i] * l.delta[i]; // input * l.delta } } #ifdef GPU void forward_sam_layer_gpu(const layer l, network_state state) { int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = 1; sam_gpu(state.net.layers[l.index].output_gpu, size, channel_size, state.input, l.output_gpu); activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } void backward_sam_layer_gpu(const layer l, network_state state) { gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); int size = l.batch * l.out_c * l.out_w * l.out_h; int channel_size = 1; float *from_output = state.net.layers[l.index].output_gpu; float *from_delta = state.net.layers[l.index].delta_gpu; backward_sam_gpu(l.delta_gpu, size, channel_size, state.input, from_delta, from_output, state.delta); } #endif
libdecimate.c
/* * This file is part of the libdecimate library * (https://github.com/zindy/libdecimate) * Copyright (c) 2017 Egor Zindy * * libdecimate is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, version 3. * * libdecimate is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdlib.h> #include <stdint.h> #include <errno.h> #include <math.h> #include <omp.h> #include "libdecimate.h" //takes three arrays (RGB) and a max value, returns a Nx3 list of unique RGB values void decimate( uint16_t *ArrayR, int ZdimR, int YdimR, int XdimR, uint16_t *ArrayG, int ZdimG, int YdimG, int XdimG, uint16_t *ArrayB, int ZdimB, int YdimB, int XdimB, int input_bit, int output_bit, uint16_t **ArrayOut, int *YdimOut, int *XdimOut) { uint64_t i,j,n=0; //output array uint16_t* decomp = NULL; int decimation = input_bit - output_bit; decimation = (decimation > 0)?decimation:0; uint64_t max_out = pow(2,output_bit)-1; uint64_t signature, temp_r, temp_g, temp_b, max_signature=0; //This is the truth array that holds all three RGB values uint8_t* truth = NULL; if ( ZdimR != ZdimG || ZdimR != ZdimB || YdimR != YdimG || YdimR != YdimB || XdimR != XdimG || XdimR != XdimB) { errno = E2BIG; goto end; } //The truth table needs to encompass all the values from 0 to 2**outputbits in all three dimensions. truth = (uint8_t *)calloc(pow(2,(output_bit*3)), sizeof(uint8_t)); if (truth == NULL) { errno = ENOMEM; goto end; } //The strategy here is to fill truth array if we have a hit. Simultaneously, we could have multiple threads hitting the same //truth index. So don't count the number of hits as part of this loop, we can do that afterwards #pragma omp parallel for \ default(shared) private(temp_r, temp_g, temp_b, signature) for (i=0; i<ZdimR*YdimR*XdimR; i++) { temp_r = (uint64_t)ArrayR[i] >> decimation; if (temp_r > max_out) temp_r = max_out; temp_g = (uint64_t)ArrayG[i] >> decimation; if (temp_g > max_out) temp_g = max_out; temp_b = (uint64_t)ArrayB[i] >> decimation; if (temp_b > max_out) temp_b = max_out; //construct the rgb signature signature = temp_r | (temp_g << output_bit) | (temp_b << (output_bit*2)); if (truth[signature] == 1) continue; if (signature > max_signature) { max_signature = signature; } //printf("signature=%d max_sig=%d\n",signature,max_signature); truth[signature] = 1; } //we need to count the total number of hits, this is to determine the size of the output array (nx3). n = 0; #pragma omp parallel for \ default(shared) reduction( + : n ) for (i=0; i<=max_signature; i++) { if (truth[i] > 0) n++; } //output is 16 bit (up to) //could definitely combine these both into a single realloc. This just reminds me what is going on. if (*ArrayOut == NULL) { decomp = (uint16_t *)malloc(3*n*sizeof(uint16_t)); } else { decomp = (uint16_t *)realloc(*ArrayOut, 3*n*sizeof(uint16_t)); } if (decomp == NULL) { errno = ENOMEM; goto end; } //use this as a mask of width output_bit to unpack the index back into 3 r,g,b values signature = pow(2,output_bit) - 1; j = 0; for (i=0; i<=max_signature; i++) { if (truth[i] > 0) { //printf("i=%d j=%d truth[i]=%d\n",i,j,truth[i]); temp_r = i & signature; temp_g = (i >> output_bit) & signature; temp_b = (i >> (output_bit*2)) & signature; decomp[j++] = (uint16_t)temp_r; decomp[j++] = (uint16_t)temp_g; decomp[j++] = (uint16_t)temp_b; truth[i] = 0; } } end: if (truth != NULL) free(truth); *ArrayOut = decomp; *YdimOut = n; *XdimOut = 3; } //This re void decimate_indexes( uint16_t *ArrayR, int ZdimR, int YdimR, int XdimR, uint16_t *ArrayG, int ZdimG, int YdimG, int XdimG, uint16_t *ArrayB, int ZdimB, int YdimB, int XdimB, int input_bit, int output_bit, uint64_t **ArrayOut, int *NdimOut) { uint64_t i,j,n=0; //output array uint64_t* decomp = NULL; int decimation = input_bit - output_bit; decimation = (decimation > 0)?decimation:0; uint64_t max_out = pow(2,output_bit)-1; uint64_t signature, temp_r, temp_g, temp_b, max_signature=0; //This is the truth array that holds all three RGB values uint8_t* truth = NULL; uint64_t* truth_index = NULL; if ( ZdimR != ZdimG || ZdimR != ZdimB || YdimR != YdimG || YdimR != YdimB || XdimR != XdimG || XdimR != XdimB) { errno = E2BIG; goto end; } //The truth table needs to encompass all the values from 0 to 2**outputbits in all three dimensions. truth = (uint8_t *)calloc(pow(2,(output_bit*3)), sizeof(uint8_t)); truth_index = (uint64_t *)malloc(pow(2,(output_bit*3))*sizeof(uint64_t)); if (truth == NULL || truth_index == NULL) { errno = ENOMEM; goto end; } //The strategy here is to fill truth array if we have a hit. Simultaneously, we could have multiple threads hitting the same //truth index. So don't count the number of hits as part of this loop, we can do that afterwards #pragma omp parallel for \ default(shared) private(temp_r, temp_g, temp_b, signature) for (i=0; i<ZdimR*YdimR*XdimR; i++) { temp_r = (uint64_t)ArrayR[i] >> decimation; if (temp_r > max_out) temp_r = max_out; temp_g = (uint64_t)ArrayG[i] >> decimation; if (temp_g > max_out) temp_g = max_out; temp_b = (uint64_t)ArrayB[i] >> decimation; if (temp_b > max_out) temp_b = max_out; //construct the rgb signature signature = temp_r | (temp_g << output_bit) | (temp_b << (output_bit*2)); if (truth[signature] == 1) continue; if (signature > max_signature) { max_signature = signature; } truth[signature] = 1; truth_index[signature] = i; //printf("signature=%d max_sig=%d i=%d truth_index[signature]=%d\n",signature,max_signature,i, truth_index[signature]); } //we need to count the total number of hits, this is to determine the size of the output array (nx3). n = 0; #pragma omp parallel for \ default(shared) reduction( + : n ) for (i=0; i<=max_signature; i++) { if (truth[i] > 0) n++; } //output is 16 bit (up to) //could definitely combine these both into a single realloc. This just reminds me what is going on. if (*ArrayOut == NULL) { decomp = (uint64_t *)malloc(n*sizeof(uint64_t)); } else { decomp = (uint64_t *)realloc(*ArrayOut, n*sizeof(uint64_t)); } if (decomp == NULL) { errno = ENOMEM; goto end; } j = 0; for (i=0; i<=max_signature; i++) { if (truth[i] > 0) { //printf("i=%d j=%d truth[i]=%d truth_index[i]=%d\n",i,j,truth[i],truth_index[i]); decomp[j] = truth_index[i]; truth[i] = 0; j++; } } end: if (truth != NULL) free(truth); if (truth_index != NULL) free(truth_index); *ArrayOut = decomp; *NdimOut = n; }
convolution_3x3_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps(bias + p * 8) : _mm256_set1_ps(0.f); out.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr = out; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* kptr = kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr); __m256 _sum01 = _mm256_setzero_ps(); __m256 _sum10 = _mm256_loadu_ps(outptr + 8); __m256 _sum11 = _mm256_setzero_ps(); __m256 _r000 = _mm256_broadcast_ss(r0 + 0); __m256 _r001 = _mm256_broadcast_ss(r0 + 1); __m256 _r002 = _mm256_broadcast_ss(r0 + 2); __m256 _r003 = _mm256_broadcast_ss(r0 + 3); __m256 _r004 = _mm256_broadcast_ss(r0 + 4); __m256 _r005 = _mm256_broadcast_ss(r0 + 5); __m256 _r006 = _mm256_broadcast_ss(r0 + 6); __m256 _r007 = _mm256_broadcast_ss(r0 + 7); __m256 _k00 = _mm256_loadu_ps(kptr); __m256 _k01 = _mm256_loadu_ps(kptr + 8); __m256 _k02 = _mm256_loadu_ps(kptr + 16); __m256 _k03 = _mm256_loadu_ps(kptr + 24); __m256 _k04 = _mm256_loadu_ps(kptr + 32); __m256 _k05 = _mm256_loadu_ps(kptr + 40); __m256 _k06 = _mm256_loadu_ps(kptr + 48); __m256 _k07 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r000, _k00, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r001, _k01, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r002, _k02, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r003, _k03, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r004, _k04, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r005, _k05, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r006, _k06, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r007, _k07, _sum01); __m256 _r010 = _mm256_broadcast_ss(r0 + 8); __m256 _r011 = _mm256_broadcast_ss(r0 + 9); __m256 _r012 = _mm256_broadcast_ss(r0 + 10); __m256 _r013 = _mm256_broadcast_ss(r0 + 11); __m256 _r014 = _mm256_broadcast_ss(r0 + 12); __m256 _r015 = _mm256_broadcast_ss(r0 + 13); __m256 _r016 = _mm256_broadcast_ss(r0 + 14); __m256 _r017 = _mm256_broadcast_ss(r0 + 15); _sum10 = _mm256_comp_fmadd_ps(_r010, _k00, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r011, _k01, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r012, _k02, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r013, _k03, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r014, _k04, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r015, _k05, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r016, _k06, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r017, _k07, _sum11); __m256 _k10 = _mm256_loadu_ps(kptr); __m256 _k11 = _mm256_loadu_ps(kptr + 8); __m256 _k12 = _mm256_loadu_ps(kptr + 16); __m256 _k13 = _mm256_loadu_ps(kptr + 24); __m256 _k14 = _mm256_loadu_ps(kptr + 32); __m256 _k15 = _mm256_loadu_ps(kptr + 40); __m256 _k16 = _mm256_loadu_ps(kptr + 48); __m256 _k17 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r010, _k10, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r011, _k11, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r012, _k12, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r013, _k13, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r014, _k14, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r015, _k15, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r016, _k16, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r017, _k17, _sum01); __m256 _r020 = _mm256_broadcast_ss(r0 + 16); __m256 _r021 = _mm256_broadcast_ss(r0 + 17); __m256 _r022 = _mm256_broadcast_ss(r0 + 18); __m256 _r023 = _mm256_broadcast_ss(r0 + 19); __m256 _r024 = _mm256_broadcast_ss(r0 + 20); __m256 _r025 = _mm256_broadcast_ss(r0 + 21); __m256 _r026 = _mm256_broadcast_ss(r0 + 22); __m256 _r027 = _mm256_broadcast_ss(r0 + 23); _sum10 = _mm256_comp_fmadd_ps(_r020, _k10, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r021, _k11, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r022, _k12, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r023, _k13, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r024, _k14, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r025, _k15, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r026, _k16, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r027, _k17, _sum11); __m256 _k20 = _mm256_loadu_ps(kptr); __m256 _k21 = _mm256_loadu_ps(kptr + 8); __m256 _k22 = _mm256_loadu_ps(kptr + 16); __m256 _k23 = _mm256_loadu_ps(kptr + 24); __m256 _k24 = _mm256_loadu_ps(kptr + 32); __m256 _k25 = _mm256_loadu_ps(kptr + 40); __m256 _k26 = _mm256_loadu_ps(kptr + 48); __m256 _k27 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r020, _k20, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r021, _k21, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r022, _k22, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r023, _k23, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r024, _k24, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r025, _k25, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r026, _k26, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r027, _k27, _sum01); __m256 _r030 = _mm256_broadcast_ss(r0 + 24); __m256 _r031 = _mm256_broadcast_ss(r0 + 25); __m256 _r032 = _mm256_broadcast_ss(r0 + 26); __m256 _r033 = _mm256_broadcast_ss(r0 + 27); __m256 _r034 = _mm256_broadcast_ss(r0 + 28); __m256 _r035 = _mm256_broadcast_ss(r0 + 29); __m256 _r036 = _mm256_broadcast_ss(r0 + 30); __m256 _r037 = _mm256_broadcast_ss(r0 + 31); _sum10 = _mm256_comp_fmadd_ps(_r030, _k20, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r031, _k21, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r032, _k22, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r033, _k23, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r034, _k24, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r035, _k25, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r036, _k26, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r037, _k27, _sum11); __m256 _r100 = _mm256_broadcast_ss(r1 + 0); __m256 _r101 = _mm256_broadcast_ss(r1 + 1); __m256 _r102 = _mm256_broadcast_ss(r1 + 2); __m256 _r103 = _mm256_broadcast_ss(r1 + 3); __m256 _r104 = _mm256_broadcast_ss(r1 + 4); __m256 _r105 = _mm256_broadcast_ss(r1 + 5); __m256 _r106 = _mm256_broadcast_ss(r1 + 6); __m256 _r107 = _mm256_broadcast_ss(r1 + 7); __m256 _k30 = _mm256_loadu_ps(kptr); __m256 _k31 = _mm256_loadu_ps(kptr + 8); __m256 _k32 = _mm256_loadu_ps(kptr + 16); __m256 _k33 = _mm256_loadu_ps(kptr + 24); __m256 _k34 = _mm256_loadu_ps(kptr + 32); __m256 _k35 = _mm256_loadu_ps(kptr + 40); __m256 _k36 = _mm256_loadu_ps(kptr + 48); __m256 _k37 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r100, _k30, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r101, _k31, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r102, _k32, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r103, _k33, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r104, _k34, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r105, _k35, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r106, _k36, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r107, _k37, _sum01); __m256 _r110 = _mm256_broadcast_ss(r1 + 8); __m256 _r111 = _mm256_broadcast_ss(r1 + 9); __m256 _r112 = _mm256_broadcast_ss(r1 + 10); __m256 _r113 = _mm256_broadcast_ss(r1 + 11); __m256 _r114 = _mm256_broadcast_ss(r1 + 12); __m256 _r115 = _mm256_broadcast_ss(r1 + 13); __m256 _r116 = _mm256_broadcast_ss(r1 + 14); __m256 _r117 = _mm256_broadcast_ss(r1 + 15); _sum10 = _mm256_comp_fmadd_ps(_r110, _k30, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r111, _k31, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r112, _k32, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r113, _k33, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r114, _k34, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r115, _k35, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r116, _k36, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r117, _k37, _sum11); __m256 _k40 = _mm256_loadu_ps(kptr); __m256 _k41 = _mm256_loadu_ps(kptr + 8); __m256 _k42 = _mm256_loadu_ps(kptr + 16); __m256 _k43 = _mm256_loadu_ps(kptr + 24); __m256 _k44 = _mm256_loadu_ps(kptr + 32); __m256 _k45 = _mm256_loadu_ps(kptr + 40); __m256 _k46 = _mm256_loadu_ps(kptr + 48); __m256 _k47 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r110, _k40, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r111, _k41, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r112, _k42, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r113, _k43, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r114, _k44, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r115, _k45, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r116, _k46, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r117, _k47, _sum01); __m256 _r120 = _mm256_broadcast_ss(r1 + 16); __m256 _r121 = _mm256_broadcast_ss(r1 + 17); __m256 _r122 = _mm256_broadcast_ss(r1 + 18); __m256 _r123 = _mm256_broadcast_ss(r1 + 19); __m256 _r124 = _mm256_broadcast_ss(r1 + 20); __m256 _r125 = _mm256_broadcast_ss(r1 + 21); __m256 _r126 = _mm256_broadcast_ss(r1 + 22); __m256 _r127 = _mm256_broadcast_ss(r1 + 23); _sum10 = _mm256_comp_fmadd_ps(_r120, _k40, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r121, _k41, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r122, _k42, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r123, _k43, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r124, _k44, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r125, _k45, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r126, _k46, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r127, _k47, _sum11); __m256 _k50 = _mm256_loadu_ps(kptr); __m256 _k51 = _mm256_loadu_ps(kptr + 8); __m256 _k52 = _mm256_loadu_ps(kptr + 16); __m256 _k53 = _mm256_loadu_ps(kptr + 24); __m256 _k54 = _mm256_loadu_ps(kptr + 32); __m256 _k55 = _mm256_loadu_ps(kptr + 40); __m256 _k56 = _mm256_loadu_ps(kptr + 48); __m256 _k57 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r120, _k50, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r121, _k51, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r122, _k52, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r123, _k53, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r124, _k54, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r125, _k55, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r126, _k56, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r127, _k57, _sum01); __m256 _r130 = _mm256_broadcast_ss(r1 + 24); __m256 _r131 = _mm256_broadcast_ss(r1 + 25); __m256 _r132 = _mm256_broadcast_ss(r1 + 26); __m256 _r133 = _mm256_broadcast_ss(r1 + 27); __m256 _r134 = _mm256_broadcast_ss(r1 + 28); __m256 _r135 = _mm256_broadcast_ss(r1 + 29); __m256 _r136 = _mm256_broadcast_ss(r1 + 30); __m256 _r137 = _mm256_broadcast_ss(r1 + 31); _sum10 = _mm256_comp_fmadd_ps(_r130, _k50, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r131, _k51, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r132, _k52, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r133, _k53, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r134, _k54, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r135, _k55, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r136, _k56, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r137, _k57, _sum11); __m256 _r200 = _mm256_broadcast_ss(r2 + 0); __m256 _r201 = _mm256_broadcast_ss(r2 + 1); __m256 _r202 = _mm256_broadcast_ss(r2 + 2); __m256 _r203 = _mm256_broadcast_ss(r2 + 3); __m256 _r204 = _mm256_broadcast_ss(r2 + 4); __m256 _r205 = _mm256_broadcast_ss(r2 + 5); __m256 _r206 = _mm256_broadcast_ss(r2 + 6); __m256 _r207 = _mm256_broadcast_ss(r2 + 7); __m256 _k60 = _mm256_loadu_ps(kptr); __m256 _k61 = _mm256_loadu_ps(kptr + 8); __m256 _k62 = _mm256_loadu_ps(kptr + 16); __m256 _k63 = _mm256_loadu_ps(kptr + 24); __m256 _k64 = _mm256_loadu_ps(kptr + 32); __m256 _k65 = _mm256_loadu_ps(kptr + 40); __m256 _k66 = _mm256_loadu_ps(kptr + 48); __m256 _k67 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r200, _k60, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r201, _k61, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r202, _k62, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r203, _k63, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r204, _k64, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r205, _k65, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r206, _k66, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r207, _k67, _sum01); __m256 _r210 = _mm256_broadcast_ss(r2 + 8); __m256 _r211 = _mm256_broadcast_ss(r2 + 9); __m256 _r212 = _mm256_broadcast_ss(r2 + 10); __m256 _r213 = _mm256_broadcast_ss(r2 + 11); __m256 _r214 = _mm256_broadcast_ss(r2 + 12); __m256 _r215 = _mm256_broadcast_ss(r2 + 13); __m256 _r216 = _mm256_broadcast_ss(r2 + 14); __m256 _r217 = _mm256_broadcast_ss(r2 + 15); _sum10 = _mm256_comp_fmadd_ps(_r210, _k60, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r211, _k61, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r212, _k62, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r213, _k63, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r214, _k64, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r215, _k65, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r216, _k66, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r217, _k67, _sum11); __m256 _k70 = _mm256_loadu_ps(kptr); __m256 _k71 = _mm256_loadu_ps(kptr + 8); __m256 _k72 = _mm256_loadu_ps(kptr + 16); __m256 _k73 = _mm256_loadu_ps(kptr + 24); __m256 _k74 = _mm256_loadu_ps(kptr + 32); __m256 _k75 = _mm256_loadu_ps(kptr + 40); __m256 _k76 = _mm256_loadu_ps(kptr + 48); __m256 _k77 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum00 = _mm256_comp_fmadd_ps(_r210, _k70, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r211, _k71, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r212, _k72, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r213, _k73, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r214, _k74, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r215, _k75, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r216, _k76, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r217, _k77, _sum01); __m256 _r220 = _mm256_broadcast_ss(r2 + 16); __m256 _r221 = _mm256_broadcast_ss(r2 + 17); __m256 _r222 = _mm256_broadcast_ss(r2 + 18); __m256 _r223 = _mm256_broadcast_ss(r2 + 19); __m256 _r224 = _mm256_broadcast_ss(r2 + 20); __m256 _r225 = _mm256_broadcast_ss(r2 + 21); __m256 _r226 = _mm256_broadcast_ss(r2 + 22); __m256 _r227 = _mm256_broadcast_ss(r2 + 23); _sum10 = _mm256_comp_fmadd_ps(_r220, _k70, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r221, _k71, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r222, _k72, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r223, _k73, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r224, _k74, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r225, _k75, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r226, _k76, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r227, _k77, _sum11); __m256 _k80 = _mm256_loadu_ps(kptr); __m256 _k81 = _mm256_loadu_ps(kptr + 8); __m256 _k82 = _mm256_loadu_ps(kptr + 16); __m256 _k83 = _mm256_loadu_ps(kptr + 24); __m256 _k84 = _mm256_loadu_ps(kptr + 32); __m256 _k85 = _mm256_loadu_ps(kptr + 40); __m256 _k86 = _mm256_loadu_ps(kptr + 48); __m256 _k87 = _mm256_loadu_ps(kptr + 56); _sum00 = _mm256_comp_fmadd_ps(_r220, _k80, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r221, _k81, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r222, _k82, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r223, _k83, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r224, _k84, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r225, _k85, _sum01); _sum00 = _mm256_comp_fmadd_ps(_r226, _k86, _sum00); _sum01 = _mm256_comp_fmadd_ps(_r227, _k87, _sum01); __m256 _r230 = _mm256_broadcast_ss(r2 + 24); __m256 _r231 = _mm256_broadcast_ss(r2 + 25); __m256 _r232 = _mm256_broadcast_ss(r2 + 26); __m256 _r233 = _mm256_broadcast_ss(r2 + 27); __m256 _r234 = _mm256_broadcast_ss(r2 + 28); __m256 _r235 = _mm256_broadcast_ss(r2 + 29); __m256 _r236 = _mm256_broadcast_ss(r2 + 30); __m256 _r237 = _mm256_broadcast_ss(r2 + 31); _sum10 = _mm256_comp_fmadd_ps(_r230, _k80, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r231, _k81, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r232, _k82, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r233, _k83, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r234, _k84, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r235, _k85, _sum11); _sum10 = _mm256_comp_fmadd_ps(_r236, _k86, _sum10); _sum11 = _mm256_comp_fmadd_ps(_r237, _k87, _sum11); kptr -= 64 * 8; _sum00 = _mm256_add_ps(_sum00, _sum01); _sum10 = _mm256_add_ps(_sum10, _sum11); _mm256_storeu_ps(outptr, _sum00); _mm256_storeu_ps(outptr + 8, _sum10); r0 += 16; r1 += 16; r2 += 16; outptr += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(outptr); __m256 _sum1 = _mm256_setzero_ps(); __m256 _r000 = _mm256_broadcast_ss(r0 + 0); __m256 _r001 = _mm256_broadcast_ss(r0 + 1); __m256 _r002 = _mm256_broadcast_ss(r0 + 2); __m256 _r003 = _mm256_broadcast_ss(r0 + 3); __m256 _r004 = _mm256_broadcast_ss(r0 + 4); __m256 _r005 = _mm256_broadcast_ss(r0 + 5); __m256 _r006 = _mm256_broadcast_ss(r0 + 6); __m256 _r007 = _mm256_broadcast_ss(r0 + 7); __m256 _k00 = _mm256_loadu_ps(kptr); __m256 _k01 = _mm256_loadu_ps(kptr + 8); __m256 _k02 = _mm256_loadu_ps(kptr + 16); __m256 _k03 = _mm256_loadu_ps(kptr + 24); __m256 _k04 = _mm256_loadu_ps(kptr + 32); __m256 _k05 = _mm256_loadu_ps(kptr + 40); __m256 _k06 = _mm256_loadu_ps(kptr + 48); __m256 _k07 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r000, _k00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r001, _k01, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r002, _k02, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r003, _k03, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r004, _k04, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r005, _k05, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r006, _k06, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r007, _k07, _sum1); __m256 _r010 = _mm256_broadcast_ss(r0 + 8); __m256 _r011 = _mm256_broadcast_ss(r0 + 9); __m256 _r012 = _mm256_broadcast_ss(r0 + 10); __m256 _r013 = _mm256_broadcast_ss(r0 + 11); __m256 _r014 = _mm256_broadcast_ss(r0 + 12); __m256 _r015 = _mm256_broadcast_ss(r0 + 13); __m256 _r016 = _mm256_broadcast_ss(r0 + 14); __m256 _r017 = _mm256_broadcast_ss(r0 + 15); __m256 _k10 = _mm256_loadu_ps(kptr); __m256 _k11 = _mm256_loadu_ps(kptr + 8); __m256 _k12 = _mm256_loadu_ps(kptr + 16); __m256 _k13 = _mm256_loadu_ps(kptr + 24); __m256 _k14 = _mm256_loadu_ps(kptr + 32); __m256 _k15 = _mm256_loadu_ps(kptr + 40); __m256 _k16 = _mm256_loadu_ps(kptr + 48); __m256 _k17 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r010, _k10, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r011, _k11, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r012, _k12, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r013, _k13, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r014, _k14, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r015, _k15, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r016, _k16, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r017, _k17, _sum1); __m256 _r020 = _mm256_broadcast_ss(r0 + 16); __m256 _r021 = _mm256_broadcast_ss(r0 + 17); __m256 _r022 = _mm256_broadcast_ss(r0 + 18); __m256 _r023 = _mm256_broadcast_ss(r0 + 19); __m256 _r024 = _mm256_broadcast_ss(r0 + 20); __m256 _r025 = _mm256_broadcast_ss(r0 + 21); __m256 _r026 = _mm256_broadcast_ss(r0 + 22); __m256 _r027 = _mm256_broadcast_ss(r0 + 23); __m256 _k20 = _mm256_loadu_ps(kptr); __m256 _k21 = _mm256_loadu_ps(kptr + 8); __m256 _k22 = _mm256_loadu_ps(kptr + 16); __m256 _k23 = _mm256_loadu_ps(kptr + 24); __m256 _k24 = _mm256_loadu_ps(kptr + 32); __m256 _k25 = _mm256_loadu_ps(kptr + 40); __m256 _k26 = _mm256_loadu_ps(kptr + 48); __m256 _k27 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r020, _k20, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r021, _k21, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r022, _k22, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r023, _k23, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r024, _k24, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r025, _k25, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r026, _k26, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r027, _k27, _sum1); __m256 _r100 = _mm256_broadcast_ss(r1 + 0); __m256 _r101 = _mm256_broadcast_ss(r1 + 1); __m256 _r102 = _mm256_broadcast_ss(r1 + 2); __m256 _r103 = _mm256_broadcast_ss(r1 + 3); __m256 _r104 = _mm256_broadcast_ss(r1 + 4); __m256 _r105 = _mm256_broadcast_ss(r1 + 5); __m256 _r106 = _mm256_broadcast_ss(r1 + 6); __m256 _r107 = _mm256_broadcast_ss(r1 + 7); __m256 _k30 = _mm256_loadu_ps(kptr); __m256 _k31 = _mm256_loadu_ps(kptr + 8); __m256 _k32 = _mm256_loadu_ps(kptr + 16); __m256 _k33 = _mm256_loadu_ps(kptr + 24); __m256 _k34 = _mm256_loadu_ps(kptr + 32); __m256 _k35 = _mm256_loadu_ps(kptr + 40); __m256 _k36 = _mm256_loadu_ps(kptr + 48); __m256 _k37 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r100, _k30, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r101, _k31, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r102, _k32, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r103, _k33, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r104, _k34, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r105, _k35, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r106, _k36, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r107, _k37, _sum1); __m256 _r110 = _mm256_broadcast_ss(r1 + 8); __m256 _r111 = _mm256_broadcast_ss(r1 + 9); __m256 _r112 = _mm256_broadcast_ss(r1 + 10); __m256 _r113 = _mm256_broadcast_ss(r1 + 11); __m256 _r114 = _mm256_broadcast_ss(r1 + 12); __m256 _r115 = _mm256_broadcast_ss(r1 + 13); __m256 _r116 = _mm256_broadcast_ss(r1 + 14); __m256 _r117 = _mm256_broadcast_ss(r1 + 15); __m256 _k40 = _mm256_loadu_ps(kptr); __m256 _k41 = _mm256_loadu_ps(kptr + 8); __m256 _k42 = _mm256_loadu_ps(kptr + 16); __m256 _k43 = _mm256_loadu_ps(kptr + 24); __m256 _k44 = _mm256_loadu_ps(kptr + 32); __m256 _k45 = _mm256_loadu_ps(kptr + 40); __m256 _k46 = _mm256_loadu_ps(kptr + 48); __m256 _k47 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r110, _k40, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r111, _k41, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r112, _k42, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r113, _k43, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r114, _k44, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r115, _k45, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r116, _k46, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r117, _k47, _sum1); __m256 _r120 = _mm256_broadcast_ss(r1 + 16); __m256 _r121 = _mm256_broadcast_ss(r1 + 17); __m256 _r122 = _mm256_broadcast_ss(r1 + 18); __m256 _r123 = _mm256_broadcast_ss(r1 + 19); __m256 _r124 = _mm256_broadcast_ss(r1 + 20); __m256 _r125 = _mm256_broadcast_ss(r1 + 21); __m256 _r126 = _mm256_broadcast_ss(r1 + 22); __m256 _r127 = _mm256_broadcast_ss(r1 + 23); __m256 _k50 = _mm256_loadu_ps(kptr); __m256 _k51 = _mm256_loadu_ps(kptr + 8); __m256 _k52 = _mm256_loadu_ps(kptr + 16); __m256 _k53 = _mm256_loadu_ps(kptr + 24); __m256 _k54 = _mm256_loadu_ps(kptr + 32); __m256 _k55 = _mm256_loadu_ps(kptr + 40); __m256 _k56 = _mm256_loadu_ps(kptr + 48); __m256 _k57 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r120, _k50, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r121, _k51, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r122, _k52, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r123, _k53, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r124, _k54, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r125, _k55, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r126, _k56, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r127, _k57, _sum1); __m256 _r200 = _mm256_broadcast_ss(r2 + 0); __m256 _r201 = _mm256_broadcast_ss(r2 + 1); __m256 _r202 = _mm256_broadcast_ss(r2 + 2); __m256 _r203 = _mm256_broadcast_ss(r2 + 3); __m256 _r204 = _mm256_broadcast_ss(r2 + 4); __m256 _r205 = _mm256_broadcast_ss(r2 + 5); __m256 _r206 = _mm256_broadcast_ss(r2 + 6); __m256 _r207 = _mm256_broadcast_ss(r2 + 7); __m256 _k60 = _mm256_loadu_ps(kptr); __m256 _k61 = _mm256_loadu_ps(kptr + 8); __m256 _k62 = _mm256_loadu_ps(kptr + 16); __m256 _k63 = _mm256_loadu_ps(kptr + 24); __m256 _k64 = _mm256_loadu_ps(kptr + 32); __m256 _k65 = _mm256_loadu_ps(kptr + 40); __m256 _k66 = _mm256_loadu_ps(kptr + 48); __m256 _k67 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r200, _k60, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r201, _k61, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r202, _k62, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r203, _k63, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r204, _k64, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r205, _k65, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r206, _k66, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r207, _k67, _sum1); __m256 _r210 = _mm256_broadcast_ss(r2 + 8); __m256 _r211 = _mm256_broadcast_ss(r2 + 9); __m256 _r212 = _mm256_broadcast_ss(r2 + 10); __m256 _r213 = _mm256_broadcast_ss(r2 + 11); __m256 _r214 = _mm256_broadcast_ss(r2 + 12); __m256 _r215 = _mm256_broadcast_ss(r2 + 13); __m256 _r216 = _mm256_broadcast_ss(r2 + 14); __m256 _r217 = _mm256_broadcast_ss(r2 + 15); __m256 _k70 = _mm256_loadu_ps(kptr); __m256 _k71 = _mm256_loadu_ps(kptr + 8); __m256 _k72 = _mm256_loadu_ps(kptr + 16); __m256 _k73 = _mm256_loadu_ps(kptr + 24); __m256 _k74 = _mm256_loadu_ps(kptr + 32); __m256 _k75 = _mm256_loadu_ps(kptr + 40); __m256 _k76 = _mm256_loadu_ps(kptr + 48); __m256 _k77 = _mm256_loadu_ps(kptr + 56); kptr += 64; _sum0 = _mm256_comp_fmadd_ps(_r210, _k70, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r211, _k71, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r212, _k72, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r213, _k73, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r214, _k74, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r215, _k75, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r216, _k76, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r217, _k77, _sum1); __m256 _r220 = _mm256_broadcast_ss(r2 + 16); __m256 _r221 = _mm256_broadcast_ss(r2 + 17); __m256 _r222 = _mm256_broadcast_ss(r2 + 18); __m256 _r223 = _mm256_broadcast_ss(r2 + 19); __m256 _r224 = _mm256_broadcast_ss(r2 + 20); __m256 _r225 = _mm256_broadcast_ss(r2 + 21); __m256 _r226 = _mm256_broadcast_ss(r2 + 22); __m256 _r227 = _mm256_broadcast_ss(r2 + 23); __m256 _k80 = _mm256_loadu_ps(kptr); __m256 _k81 = _mm256_loadu_ps(kptr + 8); __m256 _k82 = _mm256_loadu_ps(kptr + 16); __m256 _k83 = _mm256_loadu_ps(kptr + 24); __m256 _k84 = _mm256_loadu_ps(kptr + 32); __m256 _k85 = _mm256_loadu_ps(kptr + 40); __m256 _k86 = _mm256_loadu_ps(kptr + 48); __m256 _k87 = _mm256_loadu_ps(kptr + 56); _sum0 = _mm256_comp_fmadd_ps(_r220, _k80, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r221, _k81, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r222, _k82, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r223, _k83, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r224, _k84, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r225, _k85, _sum1); _sum0 = _mm256_comp_fmadd_ps(_r226, _k86, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r227, _k87, _sum1); kptr -= 64 * 8; _sum0 = _mm256_add_ps(_sum0, _sum1); _mm256_storeu_ps(outptr, _sum0); r0 += 8; r1 += 8; r2 += 8; outptr += 8; } r0 += 16; r1 += 16; r2 += 16; } } } } static void conv3x3s1_winograd64_transform_kernel_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8b-8a-inch/8a-64-outch/8b; kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)4u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack8.channel(q / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 7 < inch; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00[32] = k04[k]; g00[33] = k14[k]; g00[34] = k24[k]; g00[35] = k34[k]; g00[36] = k44[k]; g00[37] = k54[k]; g00[38] = k64[k]; g00[39] = k74[k]; g00[40] = k05[k]; g00[41] = k15[k]; g00[42] = k25[k]; g00[43] = k35[k]; g00[44] = k45[k]; g00[45] = k55[k]; g00[46] = k65[k]; g00[47] = k75[k]; g00[48] = k06[k]; g00[49] = k16[k]; g00[50] = k26[k]; g00[51] = k36[k]; g00[52] = k46[k]; g00[53] = k56[k]; g00[54] = k66[k]; g00[55] = k76[k]; g00[56] = k07[k]; g00[57] = k17[k]; g00[58] = k27[k]; g00[59] = k37[k]; g00[60] = k47[k]; g00[61] = k57[k]; g00[62] = k67[k]; g00[63] = k77[k]; g00 += 64; } } } } static void conv3x3s1_winograd64_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _tmp0m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r00, _r06), _mm256_sub_ps(_r04, _r02), 5.25f); __m256 _tmp7m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r07, _r01), _mm256_sub_ps(_r03, _r05), 5.25f); _mm256_storeu_ps(tmp[0][m], _tmp0m); _mm256_storeu_ps(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; __m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_r02, _r06), _r04, 4.25f); __m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); __m256 _tmp1m = _mm256_add_ps(_tmp12a, _tmp12b); __m256 _tmp2m = _mm256_sub_ps(_tmp12a, _tmp12b); _mm256_storeu_ps(tmp[1][m], _tmp1m); _mm256_storeu_ps(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; __m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_r06, _r02, 0.25f), _r04, 1.25f); __m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(0.5f)), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); __m256 _tmp3m = _mm256_add_ps(_tmp34a, _tmp34b); __m256 _tmp4m = _mm256_sub_ps(_tmp34a, _tmp34b); _mm256_storeu_ps(tmp[3][m], _tmp3m); _mm256_storeu_ps(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; __m256 _tmp56a = _mm256_fmadd_1_ps(_r06, _mm256_fmrsub_1_ps(_r02, _r04, 1.25f), 4.f); __m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(2.f)), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); __m256 _tmp5m = _mm256_add_ps(_tmp56a, _tmp56b); __m256 _tmp6m = _mm256_sub_ps(_tmp56a, _tmp56b); _mm256_storeu_ps(tmp[5][m], _tmp5m); _mm256_storeu_ps(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 8; float* r0_tm_1 = r0_tm_0 + tiles * 8; float* r0_tm_2 = r0_tm_0 + tiles * 16; float* r0_tm_3 = r0_tm_0 + tiles * 24; float* r0_tm_4 = r0_tm_0 + tiles * 32; float* r0_tm_5 = r0_tm_0 + tiles * 40; float* r0_tm_6 = r0_tm_0 + tiles * 48; float* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { __m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]); __m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]); __m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]); __m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]); __m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]); __m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]); __m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]); __m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]); __m256 _r0tm0 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp00, _tmp06), _mm256_sub_ps(_tmp04, _tmp02), 5.25f); __m256 _r0tm7 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp07, _tmp01), _mm256_sub_ps(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; __m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp02, _tmp06), _tmp04, 4.25f); __m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); __m256 _r0tm1 = _mm256_add_ps(_tmp12a, _tmp12b); __m256 _r0tm2 = _mm256_sub_ps(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; __m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); __m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(0.5f)), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); __m256 _r0tm3 = _mm256_add_ps(_tmp34a, _tmp34b); __m256 _r0tm4 = _mm256_sub_ps(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; __m256 _tmp56a = _mm256_fmadd_1_ps(_tmp06, _mm256_fmrsub_1_ps(_tmp02, _tmp04, 1.25f), 4.f); __m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(2.f)), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); __m256 _r0tm5 = _mm256_add_ps(_tmp56a, _tmp56b); __m256 _r0tm6 = _mm256_sub_ps(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; _mm256_storeu_ps(r0_tm_0, _r0tm0); _mm256_storeu_ps(r0_tm_1, _r0tm1); _mm256_storeu_ps(r0_tm_2, _r0tm2); _mm256_storeu_ps(r0_tm_3, _r0tm3); _mm256_storeu_ps(r0_tm_4, _r0tm4); _mm256_storeu_ps(r0_tm_5, _r0tm5); _mm256_storeu_ps(r0_tm_6, _r0tm6); _mm256_storeu_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); __m256 _r4 = _mm256_loadu_ps(r0 + 32); __m256 _r5 = _mm256_loadu_ps(r0 + 40); __m256 _r6 = _mm256_loadu_ps(r0 + 48); __m256 _r7 = _mm256_loadu_ps(r0 + 56); __m256 _r8 = _mm256_loadu_ps(r0 + 64); __m256 _r9 = _mm256_loadu_ps(r0 + 72); __m256 _r10 = _mm256_loadu_ps(r0 + 80); __m256 _r11 = _mm256_loadu_ps(r0 + 88); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); _mm256_storeu_ps(tm2p + 32, _r4); _mm256_storeu_ps(tm2p + 40, _r5); _mm256_storeu_ps(tm2p + 48, _r6); _mm256_storeu_ps(tm2p + 56, _r7); _mm256_storeu_ps(tm2p + 64, _r8); _mm256_storeu_ps(tm2p + 72, _r9); _mm256_storeu_ps(tm2p + 80, _r10); _mm256_storeu_ps(tm2p + 88, _r11); tm2p += 96; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); __m256 _r4 = _mm256_loadu_ps(r0 + 32); __m256 _r5 = _mm256_loadu_ps(r0 + 40); _mm256_storeu_ps(tm2p + 32, _r4); _mm256_storeu_ps(tm2p + 40, _r5); __m256 _r6 = _mm256_loadu_ps(r0 + 48); __m256 _r7 = _mm256_loadu_ps(r0 + 56); _mm256_storeu_ps(tm2p + 48, _r6); _mm256_storeu_ps(tm2p + 56, _r7); tm2p += 64; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); __m256 _r2 = _mm256_loadu_ps(r0 + 16); __m256 _r3 = _mm256_loadu_ps(r0 + 24); _mm256_storeu_ps(tm2p + 16, _r2); _mm256_storeu_ps(tm2p + 24, _r3); tm2p += 32; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r1 = _mm256_loadu_ps(r0 + 8); _mm256_storeu_ps(tm2p, _r0); _mm256_storeu_ps(tm2p + 8, _r1); tm2p += 16; r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(r0); _mm256_storeu_ps(tm2p, _r0); tm2p += 8; r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); __m256 _sum4 = _mm256_set1_ps(0.f); __m256 _sum5 = _mm256_set1_ps(0.f); __m256 _sum6 = _mm256_set1_ps(0.f); __m256 _sum7 = _mm256_set1_ps(0.f); __m256 _sum8 = _mm256_set1_ps(0.f); __m256 _sum9 = _mm256_set1_ps(0.f); __m256 _sum10 = _mm256_set1_ps(0.f); __m256 _sum11 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); __m256 _r04 = _mm256_broadcast_ss(r0 + 32); __m256 _r05 = _mm256_broadcast_ss(r0 + 40); __m256 _r06 = _mm256_broadcast_ss(r0 + 48); __m256 _r07 = _mm256_broadcast_ss(r0 + 56); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); __m256 _r08 = _mm256_broadcast_ss(r0 + 64); __m256 _r09 = _mm256_broadcast_ss(r0 + 72); __m256 _r010 = _mm256_broadcast_ss(r0 + 80); __m256 _r011 = _mm256_broadcast_ss(r0 + 88); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 33); _r05 = _mm256_broadcast_ss(r0 + 41); _r06 = _mm256_broadcast_ss(r0 + 49); _r07 = _mm256_broadcast_ss(r0 + 57); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 65); _r09 = _mm256_broadcast_ss(r0 + 73); _r010 = _mm256_broadcast_ss(r0 + 81); _r011 = _mm256_broadcast_ss(r0 + 89); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 34); _r05 = _mm256_broadcast_ss(r0 + 42); _r06 = _mm256_broadcast_ss(r0 + 50); _r07 = _mm256_broadcast_ss(r0 + 58); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 66); _r09 = _mm256_broadcast_ss(r0 + 74); _r010 = _mm256_broadcast_ss(r0 + 82); _r011 = _mm256_broadcast_ss(r0 + 90); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 35); _r05 = _mm256_broadcast_ss(r0 + 43); _r06 = _mm256_broadcast_ss(r0 + 51); _r07 = _mm256_broadcast_ss(r0 + 59); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 67); _r09 = _mm256_broadcast_ss(r0 + 75); _r010 = _mm256_broadcast_ss(r0 + 83); _r011 = _mm256_broadcast_ss(r0 + 91); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 36); _r05 = _mm256_broadcast_ss(r0 + 44); _r06 = _mm256_broadcast_ss(r0 + 52); _r07 = _mm256_broadcast_ss(r0 + 60); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 68); _r09 = _mm256_broadcast_ss(r0 + 76); _r010 = _mm256_broadcast_ss(r0 + 84); _r011 = _mm256_broadcast_ss(r0 + 92); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 37); _r05 = _mm256_broadcast_ss(r0 + 45); _r06 = _mm256_broadcast_ss(r0 + 53); _r07 = _mm256_broadcast_ss(r0 + 61); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 69); _r09 = _mm256_broadcast_ss(r0 + 77); _r010 = _mm256_broadcast_ss(r0 + 85); _r011 = _mm256_broadcast_ss(r0 + 93); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 38); _r05 = _mm256_broadcast_ss(r0 + 46); _r06 = _mm256_broadcast_ss(r0 + 54); _r07 = _mm256_broadcast_ss(r0 + 62); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 70); _r09 = _mm256_broadcast_ss(r0 + 78); _r010 = _mm256_broadcast_ss(r0 + 86); _r011 = _mm256_broadcast_ss(r0 + 94); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); _k01 = _mm256_loadu_ps(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 39); _r05 = _mm256_broadcast_ss(r0 + 47); _r06 = _mm256_broadcast_ss(r0 + 55); _r07 = _mm256_broadcast_ss(r0 + 63); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _r08 = _mm256_broadcast_ss(r0 + 71); _r09 = _mm256_broadcast_ss(r0 + 79); _r010 = _mm256_broadcast_ss(r0 + 87); _r011 = _mm256_broadcast_ss(r0 + 95); _sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8); _sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9); _sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10); _sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11); k01 += 64; r0 += 96; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); _mm256_storeu_ps(output0_tm + 32, _sum4); _mm256_storeu_ps(output0_tm + 40, _sum5); _mm256_storeu_ps(output0_tm + 48, _sum6); _mm256_storeu_ps(output0_tm + 56, _sum7); _mm256_storeu_ps(output0_tm + 64, _sum8); _mm256_storeu_ps(output0_tm + 72, _sum9); _mm256_storeu_ps(output0_tm + 80, _sum10); _mm256_storeu_ps(output0_tm + 88, _sum11); output0_tm += 96; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); __m256 _sum4 = _mm256_set1_ps(0.f); __m256 _sum5 = _mm256_set1_ps(0.f); __m256 _sum6 = _mm256_set1_ps(0.f); __m256 _sum7 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); __m256 _r04 = _mm256_broadcast_ss(r0 + 32); __m256 _r05 = _mm256_broadcast_ss(r0 + 40); __m256 _r06 = _mm256_broadcast_ss(r0 + 48); __m256 _r07 = _mm256_broadcast_ss(r0 + 56); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 33); _r05 = _mm256_broadcast_ss(r0 + 41); _r06 = _mm256_broadcast_ss(r0 + 49); _r07 = _mm256_broadcast_ss(r0 + 57); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 34); _r05 = _mm256_broadcast_ss(r0 + 42); _r06 = _mm256_broadcast_ss(r0 + 50); _r07 = _mm256_broadcast_ss(r0 + 58); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 35); _r05 = _mm256_broadcast_ss(r0 + 43); _r06 = _mm256_broadcast_ss(r0 + 51); _r07 = _mm256_broadcast_ss(r0 + 59); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 36); _r05 = _mm256_broadcast_ss(r0 + 44); _r06 = _mm256_broadcast_ss(r0 + 52); _r07 = _mm256_broadcast_ss(r0 + 60); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 37); _r05 = _mm256_broadcast_ss(r0 + 45); _r06 = _mm256_broadcast_ss(r0 + 53); _r07 = _mm256_broadcast_ss(r0 + 61); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 38); _r05 = _mm256_broadcast_ss(r0 + 46); _r06 = _mm256_broadcast_ss(r0 + 54); _r07 = _mm256_broadcast_ss(r0 + 62); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); _k01 = _mm256_loadu_ps(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _r04 = _mm256_broadcast_ss(r0 + 39); _r05 = _mm256_broadcast_ss(r0 + 47); _r06 = _mm256_broadcast_ss(r0 + 55); _r07 = _mm256_broadcast_ss(r0 + 63); _sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7); k01 += 64; r0 += 64; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); _mm256_storeu_ps(output0_tm + 32, _sum4); _mm256_storeu_ps(output0_tm + 40, _sum5); _mm256_storeu_ps(output0_tm + 48, _sum6); _mm256_storeu_ps(output0_tm + 56, _sum7); output0_tm += 64; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); __m256 _sum2 = _mm256_set1_ps(0.f); __m256 _sum3 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r00 = _mm256_broadcast_ss(r0 + 0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); __m256 _r02 = _mm256_broadcast_ss(r0 + 16); __m256 _r03 = _mm256_broadcast_ss(r0 + 24); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 8); _r00 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _r02 = _mm256_broadcast_ss(r0 + 17); _r03 = _mm256_broadcast_ss(r0 + 25); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 16); _r00 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _r02 = _mm256_broadcast_ss(r0 + 18); _r03 = _mm256_broadcast_ss(r0 + 26); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 24); _r00 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _r02 = _mm256_broadcast_ss(r0 + 19); _r03 = _mm256_broadcast_ss(r0 + 27); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 32); _r00 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _r02 = _mm256_broadcast_ss(r0 + 20); _r03 = _mm256_broadcast_ss(r0 + 28); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 40); _r00 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _r02 = _mm256_broadcast_ss(r0 + 21); _r03 = _mm256_broadcast_ss(r0 + 29); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 48); _r00 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _r02 = _mm256_broadcast_ss(r0 + 22); _r03 = _mm256_broadcast_ss(r0 + 30); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); _k01 = _mm256_loadu_ps(k01 + 56); _r00 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _r02 = _mm256_broadcast_ss(r0 + 23); _r03 = _mm256_broadcast_ss(r0 + 31); _sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3); k01 += 64; r0 += 32; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); _mm256_storeu_ps(output0_tm + 16, _sum2); _mm256_storeu_ps(output0_tm + 24, _sum3); output0_tm += 32; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); __m256 _sum1 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r0 = _mm256_broadcast_ss(r0); __m256 _r01 = _mm256_broadcast_ss(r0 + 8); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 8); _r0 = _mm256_broadcast_ss(r0 + 1); _r01 = _mm256_broadcast_ss(r0 + 9); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 16); _r0 = _mm256_broadcast_ss(r0 + 2); _r01 = _mm256_broadcast_ss(r0 + 10); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 24); _r0 = _mm256_broadcast_ss(r0 + 3); _r01 = _mm256_broadcast_ss(r0 + 11); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 32); _r0 = _mm256_broadcast_ss(r0 + 4); _r01 = _mm256_broadcast_ss(r0 + 12); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 40); _r0 = _mm256_broadcast_ss(r0 + 5); _r01 = _mm256_broadcast_ss(r0 + 13); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 48); _r0 = _mm256_broadcast_ss(r0 + 6); _r01 = _mm256_broadcast_ss(r0 + 14); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _k01 = _mm256_loadu_ps(k01 + 56); _r0 = _mm256_broadcast_ss(r0 + 7); _r01 = _mm256_broadcast_ss(r0 + 15); _sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); k01 += 64; r0 += 16; } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm + 8, _sum1); output0_tm += 16; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel0_tm.row(r); int nn = inch; // inch always > 0 __m256 _sum0 = _mm256_set1_ps(0.f); for (; nn > 0; nn--) { __m256 _k01 = _mm256_loadu_ps(k01); __m256 _r0 = _mm256_broadcast_ss(r0); __m256 _mul0 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 8); _r0 = _mm256_broadcast_ss(r0 + 1); __m256 _mul1 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 16); _r0 = _mm256_broadcast_ss(r0 + 2); __m256 _mul2 = _mm256_mul_ps(_k01, _r0); __m256 _add01 = _mm256_add_ps(_mul0, _mul1); _k01 = _mm256_loadu_ps(k01 + 24); _r0 = _mm256_broadcast_ss(r0 + 3); __m256 _mul3 = _mm256_mul_ps(_k01, _r0); __m256 _add23 = _mm256_add_ps(_mul2, _mul3); __m256 _add0123 = _mm256_add_ps(_add01, _add23); _sum0 = _mm256_add_ps(_sum0, _add0123); _k01 = _mm256_loadu_ps(k01 + 32); _r0 = _mm256_broadcast_ss(r0 + 4); __m256 _mul4 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 40); _r0 = _mm256_broadcast_ss(r0 + 5); __m256 _mul5 = _mm256_mul_ps(_k01, _r0); _k01 = _mm256_loadu_ps(k01 + 48); _r0 = _mm256_broadcast_ss(r0 + 6); __m256 _mul6 = _mm256_mul_ps(_k01, _r0); __m256 _add45 = _mm256_add_ps(_mul4, _mul5); _k01 = _mm256_loadu_ps(k01 + 56); _r0 = _mm256_broadcast_ss(r0 + 7); __m256 _mul7 = _mm256_mul_ps(_k01, _r0); __m256 _add67 = _mm256_add_ps(_mul6, _mul7); __m256 _add4567 = _mm256_add_ps(_add45, _add67); _sum0 = _mm256_add_ps(_sum0, _add4567); k01 += 64; r0 += 8; } _mm256_storeu_ps(output0_tm, _sum0); output0_tm += 8; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); float tmp[6][8][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 8; const float* output0_tm_1 = output0_tm_0 + tiles * 8; const float* output0_tm_2 = output0_tm_0 + tiles * 16; const float* output0_tm_3 = output0_tm_0 + tiles * 24; const float* output0_tm_4 = output0_tm_0 + tiles * 32; const float* output0_tm_5 = output0_tm_0 + tiles * 40; const float* output0_tm_6 = output0_tm_0 + tiles * 48; const float* output0_tm_7 = output0_tm_0 + tiles * 56; float* output0 = out0.row(i * 6) + (j * 6) * 8; // TODO neon optimize for (int m = 0; m < 8; m++) { __m256 _out0tm0 = _mm256_loadu_ps(output0_tm_0); __m256 _out0tm1 = _mm256_loadu_ps(output0_tm_1); __m256 _out0tm2 = _mm256_loadu_ps(output0_tm_2); __m256 _out0tm3 = _mm256_loadu_ps(output0_tm_3); __m256 _out0tm4 = _mm256_loadu_ps(output0_tm_4); __m256 _out0tm5 = _mm256_loadu_ps(output0_tm_5); __m256 _out0tm6 = _mm256_loadu_ps(output0_tm_6); __m256 _out0tm7 = _mm256_loadu_ps(output0_tm_7); __m256 _tmp024a = _mm256_add_ps(_out0tm1, _out0tm2); __m256 _tmp135a = _mm256_sub_ps(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; __m256 _tmp024b = _mm256_add_ps(_out0tm3, _out0tm4); __m256 _tmp135b = _mm256_sub_ps(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; __m256 _tmp024c = _mm256_add_ps(_out0tm5, _out0tm6); __m256 _tmp135c = _mm256_sub_ps(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; __m256 _tmp0m = _mm256_add_ps(_mm256_add_ps(_out0tm0, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f)); __m256 _tmp2m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); __m256 _tmp4m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); _mm256_storeu_ps(tmp[0][m], _tmp0m); _mm256_storeu_ps(tmp[2][m], _tmp2m); _mm256_storeu_ps(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; __m256 _tmp1m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); __m256 _tmp3m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); __m256 _tmp5m = _mm256_add_ps(_mm256_add_ps(_out0tm7, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f)); _mm256_storeu_ps(tmp[1][m], _tmp1m); _mm256_storeu_ps(tmp[3][m], _tmp3m); _mm256_storeu_ps(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 64; output0_tm_1 += tiles * 64; output0_tm_2 += tiles * 64; output0_tm_3 += tiles * 64; output0_tm_4 += tiles * 64; output0_tm_5 += tiles * 64; output0_tm_6 += tiles * 64; output0_tm_7 += tiles * 64; } for (int m = 0; m < 6; m++) { __m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]); __m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]); __m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]); __m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]); __m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]); __m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]); __m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]); __m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]); __m256 _tmp024a = _mm256_add_ps(_tmp01, _tmp02); __m256 _tmp135a = _mm256_sub_ps(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; __m256 _tmp024b = _mm256_add_ps(_tmp03, _tmp04); __m256 _tmp135b = _mm256_sub_ps(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; __m256 _tmp024c = _mm256_add_ps(_tmp05, _tmp06); __m256 _tmp135c = _mm256_sub_ps(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; __m256 _out00 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp00, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f))); __m256 _out02 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); __m256 _out04 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); _mm256_storeu_ps(output0, _out00); _mm256_storeu_ps(output0 + 16, _out02); _mm256_storeu_ps(output0 + 32, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; __m256 _out01 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); __m256 _out03 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); __m256 _out05 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp07, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f))); _mm256_storeu_ps(output0 + 8, _out01); _mm256_storeu_ps(output0 + 24, _out03); _mm256_storeu_ps(output0 + 40, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 8; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
ZKBpp_bool.c
/* ============================================================================ Name : ZKBpp_bool.c Author : ANONYMOUS - based on Sobuno's ZKBoo v0.1 Version : 1.0 Description : MPC BITDEC and SHA-256 evaluation using ZKBpp ============================================================================ */ #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include "shared.h" #include "omp.h" /* ============================================================================================================ ============================================================================================================ */ #define CH(e,f,g) ((e & f) ^ ((~e) & g)) int totalRandom = 0; int totalSha = 0; int totalSS = 0; int totalHash = 0; int NUM_ROUNDS = 219; void int2bin(uint32_t n, char* out) { for(int j=31; j>=0; j--) { int k = n >> j; out[31-j] = k & 1; } } uint32_t rand32() { uint32_t x; x = rand() & 0xff; x |= (rand() & 0xff) << 8; x |= (rand() & 0xff) << 16; x |= (rand() & 0xff) << 24; return x; } void printbits(uint32_t n) { if (n) { printbits(n >> 1); printf("%d", n & 1); } } int sha256(unsigned char* result, unsigned char* input, int numBits) { // SHA-256 function uint32_t hA[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; if (numBits > 447) { printf("Input too long, aborting!"); return -1; } int chars = numBits >> 3; unsigned char* chunk = calloc(64, 1); //512 bits memcpy(chunk, input, chars); chunk[chars] = 0x80; //Last 8 chars used for storing length of input without padding, in big-endian. //Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest chunk[62] = numBits >> 8; chunk[63] = numBits; uint32_t w[64]; int i; for (i = 0; i < 16; i++) { w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16) | (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3]; } uint32_t s0, s1; for (i = 16; i < 64; i++) { s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18) ^ (w[i - 15] >> 3); s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19) ^ (w[i - 2] >> 10); w[i] = w[i - 16] + s0 + w[i - 7] + s1; } uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj; a = hA[0]; b = hA[1]; c = hA[2]; d = hA[3]; e = hA[4]; f = hA[5]; g = hA[6]; h = hA[7]; for (i = 0; i < 64; i++) { s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); maj = (a & (b ^ c)) ^ (b & c); temp2 = s0 + maj; h = g; g = f; f = e; e = d + temp1; d = c; c = b; b = a; a = temp1 + temp2; } hA[0] += a; hA[1] += b; hA[2] += c; hA[3] += d; hA[4] += e; hA[5] += f; hA[6] += g; hA[7] += h; for (i = 0; i < 8; i++) { result[i * 4] = (hA[i] >> 24); result[i * 4 + 1] = (hA[i] >> 16); result[i * 4 + 2] = (hA[i] >> 8); result[i * 4 + 3] = hA[i]; } return 0; } void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) { z[0] = x[0] ^ y[0]; z[1] = x[1] ^ y[1]; z[2] = x[2] ^ y[2]; } void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom1(randomness[0], *randCount), getRandom1(randomness[1], *randCount), getRandom1(randomness[2], *randCount)}; *randCount += 1; uint32_t t[3] = { 0 }; t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1]; t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2]; t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) { z[0] = ~x[0]; z[1] = ~x[1]; z[2] = ~x[2]; } void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y[0]^c[0],i); b[1]=GETBIT(y[1]^c[1],i); b[2]=GETBIT(y[2]^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y[0]^c[0]; z[1]=x[1]^y[1]^c[1]; z[2]=x[2]^y[2]^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y^c[0],i); b[1]=GETBIT(y^c[1],i); b[2]=GETBIT(y^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y^c[0]; z[1]=x[1]^y^c[1]; z[2]=x[2]^y^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } void mpc_GAMMA(uint32_t x1[3], uint32_t x2[3], uint32_t x3[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom1(randomness[0], *randCount), getRandom1(randomness[1], *randCount), getRandom1(randomness[2], *randCount)}; *randCount += 1; uint32_t t[3] = { 0 }; uint32_t AC[3] = { 0 }; uint32_t BC[3] = { 0 }; AC[0] = x1[0] ^ x3[0] ^ 1; AC[1] = x1[1] ^ x3[1] ^ 1; AC[2] = x1[2] ^ x3[2] ^ 1; BC[0] = x2[0] ^ x3[0]; BC[1] = x2[1] ^ x3[1]; BC[2] = x2[2] ^ x3[2]; mpc_AND(AC,BC,t,randomness,randCount,views,countY); z[0] = t[0] ^ x2[0]; z[1] = t[1] ^ x2[1]; z[2] = t[2] ^ x2[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_gateA(uint32_t x[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom1(randomness[0], *randCount), getRandom1(randomness[1], *randCount), getRandom1(randomness[2], *randCount)}; *randCount += 1; uint32_t t[3] = { 0 }; t[0] = 0 ^ r[0] ^ r[1]; t[1] = x[1] ^ x[2] ^ r[1] ^ r[2]; t[2] = 0 ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_gateB(uint32_t x[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom1(randomness[0], *randCount), getRandom1(randomness[1], *randCount), getRandom1(randomness[2], *randCount)}; *randCount += 1; uint32_t t[3] = { 0 }; t[0] = 0 ^ r[0] ^ r[1]; t[1] = 0 ^ r[1] ^ r[2]; t[2] = x[0] ^ x[2] ^ 1 ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_BETA(uint32_t x[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom1(randomness[0], *randCount), getRandom1(randomness[1], *randCount), getRandom1(randomness[2], *randCount)}; *randCount += 1; uint32_t t[3] = { 0 }; uint32_t AC[3] = { 0 }; uint32_t BC[3] = { 0 }; mpc_gateA(x, BC, randomness, randCount, views, countY); mpc_gateB(x, AC, randomness, randCount, views, countY); mpc_AND(AC,BC,t,randomness,randCount,views,countY); z[0] = t[0]; z[1] = t[1] ^ x[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) { z[0] = RIGHTROTATE(x[0], i); z[1] = RIGHTROTATE(x[1], i); z[2] = RIGHTROTATE(x[2], i); } void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) { z[0] = x[0] >> i; z[1] = x[1] >> i; z[2] = x[2] >> i; } void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; uint32_t t1[3]; mpc_XOR(a, b, t0); mpc_XOR(a, c, t1); mpc_AND(t0, t1, z, randomness, randCount, views, countY); mpc_XOR(z, a, z); } void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; //e & (f^g) ^ g mpc_XOR(f,g,t0); mpc_AND(e,t0,t0, randomness, randCount, views, countY); mpc_XOR(t0,g,z); } int mpc_bitdec(unsigned char* results[3], unsigned char* inputs[3], int numBytes, unsigned char *randomness[3], View views[3], int* countY, int* randCount) { // MPC bit decomposition function that shift from a sharing in Z_q to a sharing in Z_2 if (numBytes > 32) { printf("Input too long, aborting!"); return -1; } for (int i = 0; i < 3; i++) { memcpy(views[i].x, inputs[i], 32); } uint32_t X[3]; uint32_t gamma[3]; gamma[0] = 0; gamma[1] = 0; gamma[2] = 0; uint32_t beta[3]; beta[0] = 0; beta[1] = 0; beta[2] = 0; uint32_t alpha[3]; uint32_t tmp_res[3]; for (int j=31; j>=0; j--) { X[0] = inputs[0][j]; X[1] = inputs[1][j]; X[2] = inputs[2][j]; // Compute [x~]j = [alpha]j ^ [beta]j-1 ^ [gamma]j-1 results[0][j] = X[0] ^ gamma[0] ^ beta[0]; results[1][j] = X[1] ^ gamma[1] ^ beta[1]; results[2][j] = X[2] ^ gamma[2] ^ beta[2]; // Compute [gamma]j = majority([x]j, [beta]j-1, [gamma]j-1) uint32_t AC[3] = { 0 }; uint32_t BC[3] = { 0 }; AC[0] = X[0] ^ gamma[0] ^ 1; AC[1] = X[1] ^ gamma[1] ^ 1; AC[2] = X[2] ^ gamma[2] ^ 1; BC[0] = beta[0] ^ gamma[0]; BC[1] = beta[1] ^ gamma[1]; BC[2] = beta[2] ^ gamma[2]; mpc_AND(AC,BC,gamma,randomness,randCount,views,countY); gamma[0] = gamma[0] ^ beta[0]; gamma[1] = gamma[1] ^ beta[1]; gamma[2] = gamma[2] ^ beta[2]; // Compute [beta]j = majority(x1j,x2j,x3j) mpc_BETA(X, beta, randomness, randCount, views, countY); } return 0; } int mpc_sha256(unsigned char* results[3], unsigned char* inputs[3], int numBits, unsigned char *randomness[3], View views[3], int* countY, int* randCount) { // MPC version of SHA256 on Z_2 if (numBits > 447) { printf("Input too long, aborting!"); return -1; } int chars = numBits >> 3; unsigned char* chunks[3]; uint32_t w[64][3]; for (int i = 0; i < 3; i++) { chunks[i] = calloc(64, 1); memcpy(chunks[i], inputs[i], chars); chunks[i][chars] = 0x80; chunks[i][62] = numBits >> 8; chunks[i][63] = numBits; for (int j = 0; j < 16; j++) { w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16) | (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3]; } free(chunks[i]); } uint32_t s0[3], s1[3]; uint32_t t0[3], t1[3]; for (int j = 16; j < 64; j++) { //s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3); mpc_RIGHTROTATE(w[j-15], 7, t0); mpc_RIGHTROTATE(w[j-15], 18, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-15], 3, t1); mpc_XOR(t0, t1, s0); //s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10); mpc_RIGHTROTATE(w[j-2], 17, t0); mpc_RIGHTROTATE(w[j-2], 19, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-2], 10, t1); mpc_XOR(t0, t1, s1); //w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i]; mpc_ADD(w[j-16], s0, t1, randomness, randCount, views, countY); mpc_ADD(w[j-7], t1, t1, randomness, randCount, views, countY); mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY); } uint32_t a[3] = { hA[0],hA[0],hA[0] }; uint32_t b[3] = { hA[1],hA[1],hA[1] }; uint32_t c[3] = { hA[2],hA[2],hA[2] }; uint32_t d[3] = { hA[3],hA[3],hA[3] }; uint32_t e[3] = { hA[4],hA[4],hA[4] }; uint32_t f[3] = { hA[5],hA[5],hA[5] }; uint32_t g[3] = { hA[6],hA[6],hA[6] }; uint32_t h[3] = { hA[7],hA[7],hA[7] }; uint32_t temp1[3], temp2[3], maj[3]; for (int i = 0; i < 64; i++) { //s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25); mpc_RIGHTROTATE(e, 6, t0); mpc_RIGHTROTATE(e, 11, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(e, 25, t1); mpc_XOR(t0, t1, s1); //ch = (e & f) ^ ((~e) & g); //temp1 = h + s1 + CH(e,f,g) + k[i]+w[i]; //t0 = h + s1 mpc_ADD(h, s1, t0, randomness, randCount, views, countY); mpc_CH(e, f, g, t1, randomness, randCount, views, countY); //t1 = t0 + t1 (h+s1+ch) mpc_ADD(t0, t1, t1, randomness, randCount, views, countY); mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY); mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY); //s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22); mpc_RIGHTROTATE(a, 2, t0); mpc_RIGHTROTATE(a, 13, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(a, 22, t1); mpc_XOR(t0, t1, s0); mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY); //temp2 = s0+maj; mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY); //e = d+temp1; memcpy(h, g, sizeof(uint32_t) * 3); memcpy(g, f, sizeof(uint32_t) * 3); memcpy(f, e, sizeof(uint32_t) * 3); //a = temp1+temp2; mpc_ADD(d, temp1, e, randomness, randCount, views, countY); memcpy(d, c, sizeof(uint32_t) * 3); memcpy(c, b, sizeof(uint32_t) * 3); memcpy(b, a, sizeof(uint32_t) * 3); mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY); } uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] }, { hA[4],hA[4],hA[4] }, { hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } }; mpc_ADD(hHa[0], a, hHa[0], randomness, randCount, views, countY); mpc_ADD(hHa[1], b, hHa[1], randomness, randCount, views, countY); mpc_ADD(hHa[2], c, hHa[2], randomness, randCount, views, countY); mpc_ADD(hHa[3], d, hHa[3], randomness, randCount, views, countY); mpc_ADD(hHa[4], e, hHa[4], randomness, randCount, views, countY); mpc_ADD(hHa[5], f, hHa[5], randomness, randCount, views, countY); mpc_ADD(hHa[6], g, hHa[6], randomness, randCount, views, countY); mpc_ADD(hHa[7], h, hHa[7], randomness, randCount, views, countY); for (int i = 0; i < 8; i++) { mpc_RIGHTSHIFT(hHa[i], 24, t0); results[0][i * 4] = t0[0]; results[1][i * 4] = t0[1]; results[2][i * 4] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 16, t0); results[0][i * 4 + 1] = t0[0]; results[1][i * 4 + 1] = t0[1]; results[2][i * 4 + 1] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 8, t0); results[0][i * 4 + 2] = t0[0]; results[1][i * 4 + 2] = t0[1]; results[2][i * 4 + 2] = t0[2]; results[0][i * 4 + 3] = hHa[i][0]; results[1][i * 4 + 3] = hHa[i][1]; results[2][i * 4 + 3] = hHa[i][2]; } return 0; } int writeToFile(char filename[], void* data, int size, int numItems) { FILE *file; file = fopen(filename, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(data, size, numItems, file); fclose(file); return 0; } int secretShare(unsigned char* input, int numBytes, unsigned char output[3][numBytes]) { if(RAND_bytes(output[0], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } if(RAND_bytes(output[1], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } for (int j = 0; j < numBytes; j++) { output[2][j] = input[j] ^ output[0][j] ^ output[1][j]; } return 0; } a commit(int numBytes,unsigned char shares[3][numBytes], unsigned char *randomness[3], unsigned char rs[3][4], View views[3]) { // Split the shares unsigned char* inputs[3]; inputs[0] = shares[0]; inputs[1] = shares[1]; inputs[2] = shares[2]; unsigned char* outputs[3]; outputs[0] = malloc(32); outputs[1] = malloc(32); outputs[2] = malloc(32); int* randCount = calloc(1, sizeof(int)); int* countY = calloc(1, sizeof(int)); mpc_bitdec(outputs, inputs, numBytes, randomness, views, countY, randCount); //Explicitly add y to view for (int j = 0; j < 32; ++j) { views[0].y[*countY] = outputs[0][j]; views[1].y[*countY] = outputs[1][j]; views[2].y[*countY] = outputs[2][j]; (*countY)++; } // Print for debug printf("Y0 = "); for (int j = 0; j <32; j++) { printf("%u",outputs[0][j]); } printf("\n"); printf("Y1 = "); for (int j = 0; j <32; j++) { printf("%u",outputs[1][j]); } printf("\n"); printf("Y2 = "); for (int j = 0; j<32; j++) { printf("%u",outputs[2][j]); } unsigned char y_rec[32]; for (int j = 0; j<32; j++) { y_rec[j] = (unsigned char)(outputs[0][j]^outputs[1][j]^outputs[2][j]); } // CONVERT 32 BITS TO 4 BYTES FOR THE MPC_HASH FUNCTION // /!\ Wires are assumed to be 1 bit although we used uint32_t as data type in the code unsigned char input_clean[4]; unsigned char input_byte1[4]; unsigned char input_byte2[4]; unsigned char input_byte3[4]; for(int i=0;i<4;i++) { // Test O SETBIT(input_clean[i], 7, y_rec[i*8]); SETBIT(input_clean[i], 6, y_rec[i*8+1]); SETBIT(input_clean[i], 5, y_rec[i*8+2]); SETBIT(input_clean[i], 4, y_rec[i*8+3]); SETBIT(input_clean[i], 3, y_rec[i*8+4]); SETBIT(input_clean[i], 2, y_rec[i*8+5]); SETBIT(input_clean[i], 1, y_rec[i*8+6]); SETBIT(input_clean[i], 0, y_rec[i*8+7]); // Player 1 SETBIT(input_byte1[i], 7, outputs[0][i*8]); SETBIT(input_byte1[i], 6, outputs[0][i*8+1]); SETBIT(input_byte1[i], 5, outputs[0][i*8+2]); SETBIT(input_byte1[i], 4, outputs[0][i*8+3]); SETBIT(input_byte1[i], 3, outputs[0][i*8+4]); SETBIT(input_byte1[i], 2, outputs[0][i*8+5]); SETBIT(input_byte1[i], 1, outputs[0][i*8+6]); SETBIT(input_byte1[i], 0, outputs[0][i*8+7]); // Player 2 SETBIT(input_byte2[i], 7, outputs[1][i*8]); SETBIT(input_byte2[i], 6, outputs[1][i*8+1]); SETBIT(input_byte2[i], 5, outputs[1][i*8+2]); SETBIT(input_byte2[i], 4, outputs[1][i*8+3]); SETBIT(input_byte2[i], 3, outputs[1][i*8+4]); SETBIT(input_byte2[i], 2, outputs[1][i*8+5]); SETBIT(input_byte2[i], 1, outputs[1][i*8+6]); SETBIT(input_byte2[i], 0, outputs[1][i*8+7]); // Player 3 SETBIT(input_byte3[i], 7, outputs[2][i*8]); SETBIT(input_byte3[i], 6, outputs[2][i*8+1]); SETBIT(input_byte3[i], 5, outputs[2][i*8+2]); SETBIT(input_byte3[i], 4, outputs[2][i*8+3]); SETBIT(input_byte3[i], 3, outputs[2][i*8+4]); SETBIT(input_byte3[i], 2, outputs[2][i*8+5]); SETBIT(input_byte3[i], 1, outputs[2][i*8+6]); SETBIT(input_byte3[i], 0, outputs[2][i*8+7]); } //CHECK HASH WITH DIRECT COMPUTATION unsigned char res_shaB[32]; sha256(res_shaB, input_clean, 32); // This returns the sha256 of the ASCII encoding of each Byte of the secret printf("\n"); printf("Direct sha of the binary representation of the 32b secret\n"); for(int i = 0; i<8; i++) { printf("%02X", (res_shaB[i * 4] << 24) | (res_shaB[i * 4 + 1] << 16) | (res_shaB[i * 4 + 2] << 8) | res_shaB[i * 4 + 3]); } printf("\n"); printf("count before sha %u\n", *countY); // MPC_SHA on the locally recontructed Bytes unsigned char* mpcHASHinput[3]; mpcHASHinput[0] = input_byte1; mpcHASHinput[1] = input_byte2; mpcHASHinput[2] = input_byte3; unsigned char* hashes[3]; hashes[0] = malloc(32); hashes[1] = malloc(32); hashes[2] = malloc(32); mpc_sha256(hashes, mpcHASHinput, 4*8, randomness, views, countY, randCount); //Explicitly add y to view uint32_t shamag[8]; uint32_t result1[8]; uint32_t result2[8]; uint32_t result3[8]; for(int i = 0; i<8; i++) { views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16) | (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3]; views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16) | (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3]; views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16) | (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3]; result1[i] = views[0].y[*countY]; result2[i] = views[1].y[*countY]; result3[i] = views[2].y[*countY]; *countY += 1; } // Save outuput in A container uint32_t* result1mem = malloc(32); outputSHA(views[0], result1mem); uint32_t* result2mem = malloc(32); outputSHA(views[1], result2mem); uint32_t* result3mem = malloc(32); outputSHA(views[2], result3mem); a a; memset(&a, 0, sizeof(a)); memcpy(a.yp[0], result1mem, 32); memcpy(a.yp[1], result2mem, 32); memcpy(a.yp[2], result3mem, 32); free(countY); free(randCount); return a; } z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) { z z; // prove a la ZKBoo memcpy(z.ke, keys[e], 16); memcpy(z.ke1, keys[(e + 1) % 3], 16); z.ve = views[e]; z.ve1 = views[(e + 1) % 3]; //z.x3 = ; memcpy(z.re, rs[e],4); memcpy(z.re1, rs[(e + 1) % 3],4); return z; } zz proveZ(int e, int keys[3], unsigned char rs[3][4], View views[3]) { // Prove using ZKB++ style zz zz; if(e==0){ zz.ve1 = views[(e+1)%3]; zz.ke = keys[(e)%3]; zz.ke1 = keys[(e+1)%3]; }else if(e==1){ zz.ve1 = views[(e+1)%3]; zz.ke = keys[(e)%3]; zz.ke1 = keys[(e+1)%3]; memcpy(zz.x, views[2].x, 32);; }else{ zz.ve1 = views[(e+1)%3]; zz.ke = keys[(e)%3]; zz.ke1 = keys[(e+1)%3]; memcpy(zz.x, views[2].x, 32);; } return zz; } int main(void) { setbuf(stdout, NULL); srand((unsigned) time(NULL)); init_EVP(); openmp_thread_setup(); unsigned char garbage[4]; if(RAND_bytes(garbage, 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } printf("Enter the integer to be converted: "); char userInput[55]; //55 is max length fgets(userInput, sizeof(userInput), stdin); printf("Iterations of BITDEC: %d\n", NUM_ROUNDS); uint32_t user_input = atoi(userInput); int i = 32; printf("input = %d\n", user_input ); // Create binary vector for input unsigned char input[32]; int2bin(user_input, input); printf("Xb "); for (int j = 0; j < 32; ++j) { printf("%u",input[j]); } printf("\n"); clock_t begin = clock(), delta, deltaA; unsigned char rs[NUM_ROUNDS][3][4]; unsigned char keys[NUM_ROUNDS][3][16]; a as[NUM_ROUNDS]; memset(&as, 0, sizeof(a)*NUM_ROUNDS); b bs[NUM_ROUNDS]; View localViews[NUM_ROUNDS][3]; int totalCrypto = 0; //Generating keys clock_t beginCrypto = clock(), deltaCrypto; int k0 = rand(); // seed for testing - set to random for evaluation srand(k0); int k_seed[NUM_ROUNDS][3]; for (int j = 0; j < NUM_ROUNDS; ++j) { k_seed[j][0] = rand(); k_seed[j][1] = rand(); k_seed[j][2] = rand(); } deltaCrypto = clock() - beginCrypto; int inMilliCrypto = deltaCrypto * 1000 / CLOCKS_PER_SEC; totalCrypto = inMilliCrypto; clock_t beginSS = clock(), deltaSS; unsigned char shares[NUM_ROUNDS][3][i]; for (int j = 0; j < NUM_ROUNDS; ++j) { printf("k0 %u\n", k_seed[j][0]); srand(k_seed[j][0]); for(int k=0; k<16; k++){ keys[j][0][k] = rand() % 256; } for(int k=0; k<4; k++){ rs[j][0][k] = rand() % 256; } for(int k=0; k<i; k++){ shares[j][0][k] = rand() % 256; } printf("k1 %u\n", k_seed[j][1]); srand(k_seed[j][1]); for(int k=0; k<16; k++){ keys[j][1][k] = rand() % 256; } for(int k=0; k<4; k++){ rs[j][1][k] = rand() % 256; } for(int k=0; k<i; k++){ shares[j][1][k] = rand() % 256; } printf("k2 %u\n", k_seed[j][2]); srand(k_seed[j][2]); for(int k=0; k<16; k++){ keys[j][2][k] = rand() % 256; } for(int k=0; k<4; k++){ rs[j][2][k] = rand() % 256; } for(int k=0; k<i; k++){ shares[j][2][k] = rand() % 256; } } printf("\nSharing the secret in Z32"); //#pragma omp parallel for // Removing parallelisation // For every round do : for(int k=0; k<NUM_ROUNDS; k++) { printf("\nX0b "); for (int j = 0; j < 32; ++j) { shares[k][0][j] = shares[k][0][j] %2; printf("%u",shares[k][0][j]); } // Convert X0b to arithmetic value uint32_t inputA0 = bits2int(shares[k][0]); printf(" X0d = %u\n", inputA0); printf("X1b "); for (int j = 0; j < 32; ++j) { shares[k][1][j] = shares[k][1][j] %2; printf("%u",shares[k][1][j]); } // Convert X1b to arithmetic value uint32_t inputA1 = bits2int(shares[k][1]); printf(" X1d = %u\n", inputA1); for (int j = 0; j < 32; ++j) { shares[k][2][j] = shares[k][2][j] %2; } // Create third share in Z32 uint32_t inputA2 = user_input - inputA0 - inputA1; unsigned char input2[32]; int2bin(inputA2,input2); // Create the share for Player3 by taking the bit representation of X3 for (int j = 31; j >=0; j--) { shares[k][2][j] = input2[j]; } printf("X2b "); for (int j = 0; j < 32; ++j) { printf("%u",shares[k][2][j]); } printf(" X2d = %u\n", inputA2); } deltaSS = clock() - beginSS; int inMilli = deltaSS * 1000 / CLOCKS_PER_SEC; totalSS = inMilli; //Generating randomness clock_t beginRandom = clock(), deltaRandom; unsigned char *randomness[NUM_ROUNDS][3]; //#pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { for(int j = 0; j<3; j++) { randomness[k][j] = malloc(3552*sizeof(unsigned char)); getAllRandomness(keys[k][j], randomness[k][j]); } } deltaRandom = clock() - beginRandom; inMilli = deltaRandom * 1000 / CLOCKS_PER_SEC; totalRandom = inMilli; //Running MPC-BITDEC clock_t beginSha = clock(), deltaSha; printf("\nRunning MPC BOOL\n"); //#pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { as[k] = commit(i, shares[k], randomness[k], rs[k], localViews[k]); for(int j=0; j<3; j++) { free(randomness[k][j]); } } deltaSha = clock() - beginSha; inMilli = deltaSha * 1000 / CLOCKS_PER_SEC; totalSha = inMilli; // Generating A clock_t beginHash = clock(), deltaHash; //#pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { unsigned char hash1[SHA256_DIGEST_LENGTH]; H(keys[k][0], localViews[k][0], rs[k][0], hash1); memcpy(as[k].h[0], &hash1, 32); H(keys[k][1], localViews[k][1], rs[k][1], hash1); memcpy(as[k].h[1], &hash1, 32); H(keys[k][2], localViews[k][2], rs[k][2], hash1); memcpy(as[k].h[2], &hash1, 32); } deltaHash = clock() - beginHash; inMilli = deltaHash * 1000 / CLOCKS_PER_SEC; totalHash += inMilli; deltaA = clock() - begin; int inMilliA = deltaA * 1000 / CLOCKS_PER_SEC; //Generating E clock_t beginE = clock(), deltaE; int es[NUM_ROUNDS]; uint32_t finalHash[8]; for (int j = 0; j < 8; j++) { finalHash[j] = as[0].yp[0][j]^as[0].yp[1][j]^as[0].yp[2][j]; } H3(finalHash, as, NUM_ROUNDS, es); deltaE = clock() - beginE; int inMilliE = deltaE * 1000 / CLOCKS_PER_SEC; // Generating B clock_t beginB = clock(), deltaB; //#pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { memcpy(bs[k].h, as[k].h[(es[k]+2)%3], 32); memcpy(bs[k].y, as[k].yp[(es[k]+2)%3], 32); } deltaB = clock() - beginB; int inMilliB = deltaB * 1000 / CLOCKS_PER_SEC; //Packing ZZ (z container for ZKBpp) clock_t beginZ = clock(), deltaZ; zz* zzs = malloc(sizeof(zz)*NUM_ROUNDS); //#pragma omp parallel for for(int i = 0; i<NUM_ROUNDS; i++) { zzs[i] = proveZ(es[i],k_seed[i],rs[i], localViews[i]); } deltaZ = clock() - beginZ; int inMilliZ = deltaZ * 1000 / CLOCKS_PER_SEC; //Writing to file clock_t beginWrite = clock(); FILE *file; char outputFile[30*sizeof(int) + 8]; sprintf(outputFile, "out%i.bin", NUM_ROUNDS); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(finalHash,sizeof(uint32_t), 8,file); fwrite(es, sizeof(int), NUM_ROUNDS, file); fwrite(bs, sizeof(b), NUM_ROUNDS, file); fwrite(zzs, sizeof(zz), NUM_ROUNDS, file); fclose(file); clock_t deltaWrite = clock()-beginWrite; free(zzs); int inMilliWrite = deltaWrite * 1000 / CLOCKS_PER_SEC; delta = clock() - begin; inMilli = delta * 1000 / CLOCKS_PER_SEC; int sumOfParts = 0; printf("Generating A: %ju ms\n", (uintmax_t)inMilliA); printf(" Generating keys: %ju ms\n", (uintmax_t)totalCrypto); sumOfParts += totalCrypto; printf(" Generating randomness: %ju ms\n", (uintmax_t)totalRandom); sumOfParts += totalRandom; printf(" Sharing secrets: %ju ms\n", (uintmax_t)totalSS); sumOfParts += totalSS; printf(" Running MPC-BOOL: %ju ms\n", (uintmax_t)totalSha); sumOfParts += totalSha; printf(" Committing: %ju ms\n", (uintmax_t)totalHash); sumOfParts += totalHash; printf(" *Accounted for*: %ju ms\n", (uintmax_t)sumOfParts); printf("Generating E: %ju ms\n", (uintmax_t)inMilliE); printf("Packing B: %ju ms\n", (uintmax_t)inMilliB); printf("Packing Z: %ju ms\n", (uintmax_t)inMilliZ); printf("Writing file: %ju ms\n", (uintmax_t)inMilliWrite); printf("Total time: %d ms\n",inMilli); printf("\n"); printf("Proof output to file %s", outputFile); FILE *fp; fp = fopen("TimeProve.csv", "a+"); fprintf(fp,"%ju,", user_input); fprintf(fp,"%ju,", inMilli); fprintf(fp,"%ju,", totalCrypto+totalRandom); fprintf(fp,"%ju,", totalSS); fprintf(fp,"%ju\n", totalSha+totalHash+inMilliE+inMilliZ+inMilliWrite); fclose(fp); openmp_thread_cleanup(); cleanup_EVP(); return EXIT_SUCCESS; }
omp_array.c
/****************************************************************************** * * FILE: omp_array.c * * DESCRIPTION: * * Array addition - C/C++ Version * * This is a simple array adition running with omp * * AUTHOR: Victor Rodriguez * * LAST REVISED: 04/06/05 * ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int x = 0; long int sz2G=2000000000; // c dynamic array long int * myarray_1 = malloc(sizeof(long int) *sz2G); long int * myarray_2 = malloc(sizeof(long int) *sz2G); long int * myarray_3 = malloc(sizeof(long int) *sz2G); #pragma omp parallel shared(myarray_1,myarray_2,myarray_3) private(x) { for ( x = 0; x < sz2G ; x++ ) { myarray_1[x]= myarray_2[x] + myarray_3[x]; } } free(myarray_1); free(myarray_2); free(myarray_3); }
expected_output.c
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <polybench.h> #include "adi.h" /** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /*adi.c: this file is part of PolyBench/C*/ /*Include polybench common header.*/ /*Include benchmark-specific header.*/ /*Array initialization.*/ static void init_array(int n, double u[1000][1000]) { int i, j; for(i = 0; i < n; i++) for(j = 0; j < n; j++) { u[i][j] = (double) (i + n - j) / n; } } /*DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output.*/ static void print_array(int n, double u[1000][1000]) { int i, j; fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "u"); for(i = 0; i < n; i++) for(j = 0; j < n; j++) { if((i * n + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2lf ", u[i][j]); } fprintf(stderr, "\nend dump: %s\n", "u"); fprintf(stderr, "==END DUMP_ARRAYS==\n"); } /*Main computational kernel. The whole function will be timed, including the call and return.*/ /*Based on a Fortran code fragment from Figure 5 of * "Automatic Data and Computation Decomposition on Distributed Memory Parallel Computers" * by Peizong Lee and Zvi Meir Kedem, TOPLAS, 2002 */ static void kernel_adi(int tsteps, int n, double u[1000][1000], double v[1000][1000], double p[1000][1000], double q[1000][1000]) { int t, i, j; double DX, DY, DT; double B1, B2; double mul1, mul2; double a, b, c, d, e, f; DX = 1.0 / (double) n; DY = 1.0 / (double) n; DT = 1.0 / (double) tsteps; B1 = 2.0; B2 = 1.0; mul1 = B1 * DT / (DX * DX); mul2 = B2 * DT / (DY * DY); a = -mul1 / 2.0; b = 1.0 + mul1; c = a; d = -mul2 / 2.0; e = 1.0 + mul2; f = d; /*************** Clava msgError ************** unsolved dependency for arrayAccess u use : RW ****************************************/ for(t = 1; t <= tsteps; t++) { #pragma omp parallel for default(shared) private(i, j) firstprivate(n, a, b, c, d, f, u) for(i = 1; i < n - 1; i++) { v[0][i] = 1.0; p[i][0] = 0.0; q[i][0] = v[0][i]; /*************** Clava msgError ************** unsolved dependency for arrayAccess p use : RWR unsolved dependency for arrayAccess q use : RW ****************************************/ for(j = 1; j < n - 1; j++) { p[i][j] = -c / (a * p[i][j - 1] + b); q[i][j] = (-d * u[j][i - 1] + (1.0 + 2.0 * d) * u[j][i] - f * u[j][i + 1] - a * q[i][j - 1]) / (a * p[i][j - 1] + b); } v[n - 1][i] = 1.0; /*************** Clava msgError ************** unsolved dependency for arrayAccess v use : RW ****************************************/ for(j = n - 2; j >= 1; j--) { v[j][i] = p[i][j] * v[j + 1][i] + q[i][j]; } } #pragma omp parallel for default(shared) private(i, j) firstprivate(n, d, e, f, a, c, v) for(i = 1; i < n - 1; i++) { u[i][0] = 1.0; p[i][0] = 0.0; q[i][0] = u[i][0]; /*************** Clava msgError ************** unsolved dependency for arrayAccess p use : RWR unsolved dependency for arrayAccess q use : RW ****************************************/ for(j = 1; j < n - 1; j++) { p[i][j] = -f / (d * p[i][j - 1] + e); q[i][j] = (-a * v[i - 1][j] + (1.0 + 2.0 * a) * v[i][j] - c * v[i + 1][j] - d * q[i][j - 1]) / (d * p[i][j - 1] + e); } u[i][n - 1] = 1.0; /*************** Clava msgError ************** unsolved dependency for arrayAccess u use : RW ****************************************/ for(j = n - 2; j >= 1; j--) { u[i][j] = p[i][j] * u[i][j + 1] + q[i][j]; } } } } int main(int argc, char **argv) { /*Retrieve problem size.*/ int n = 1000; int tsteps = 500; /*Variable declaration/allocation.*/ double (*u)[1000][1000]; u = (double (*)[1000][1000]) polybench_alloc_data((1000 + 0) * (1000 + 0), sizeof(double)); ; double (*v)[1000][1000]; v = (double (*)[1000][1000]) polybench_alloc_data((1000 + 0) * (1000 + 0), sizeof(double)); ; double (*p)[1000][1000]; p = (double (*)[1000][1000]) polybench_alloc_data((1000 + 0) * (1000 + 0), sizeof(double)); ; double (*q)[1000][1000]; q = (double (*)[1000][1000]) polybench_alloc_data((1000 + 0) * (1000 + 0), sizeof(double)); ; /*Initialize array(s).*/ init_array(n, *u); /*Start timer.*/ ; /*Run kernel.*/ kernel_adi(tsteps, n, *u, *v, *p, *q); /*Stop and print timer.*/ ; ; /*Prevent dead-code elimination. All live-out data must be printed by the function call in argument.*/ if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *u); /*Be clean.*/ free((void *) u); ; free((void *) v); ; free((void *) p); ; free((void *) q); ; return 0; }
LenSoftMax.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/LenSoftMax.c" #else void THLENN_(LenSoftMax_updateOutput)( THLENNState *state, THTensor *input, THTensor *output, THIndexTensor *len) { if ((input->nDimension != 2) && (len->nDimension != 1)) { THArgCheck(0, 2, "2D tensor expected for input, 1D tensor expected for len"); } real *input_data, *output_data; THIndex_t *len_data; ptrdiff_t nframe = input->size[0], dim = input->size[1]; ptrdiff_t t; input = THTensor_(newContiguous)(input); THTensor_(resizeAs)(output, input); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); len_data = THIndexTensor_(data)(len); #pragma omp parallel for private(t) for (t = 0; t < nframe; t++) { real *input_ptr = input_data + t*dim; real *output_ptr = output_data + t*dim; real inputMax = -THInf; accreal sum; ptrdiff_t d, ld = (ptrdiff_t)len_data[t]; for (d = 0; d < ld; d++) { if (input_ptr[d] >= inputMax) inputMax = input_ptr[d]; } sum = 0; for (d = 0; d < ld; d++) { real z = exp(input_ptr[d] - inputMax); output_ptr[d] = z; sum += z; } for (d = ld; d < dim; d++) { output_ptr[d] = 0; } for (d = 0; d < ld; d++) { output_ptr[d] *= 1/sum; } } THTensor_(free)(input); } void THLENN_(LenSoftMax_updateGradInput)( THLENNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output, THIndexTensor *len) { THNN_CHECK_SHAPE(input, gradOutput); if ((output->nDimension != 2) && (len->nDimension != 1)) { THError("2D tensor expected for input, 1D tensor expected for len"); } real *gradInput_data, *gradOutput_data, *output_data; THIndex_t *len_data; ptrdiff_t nframe = output->size[0], dim = output->size[1]; ptrdiff_t t; gradOutput = THTensor_(newContiguous)(gradOutput); output = THTensor_(newContiguous)(output); THTensor_(resizeAs)(gradInput, output); gradInput_data = THTensor_(data)(gradInput); output_data = THTensor_(data)(output); gradOutput_data = THTensor_(data)(gradOutput); len_data = THIndexTensor_(data)(len); #pragma omp parallel for private(t) for (t = 0; t < nframe; t++) { real *gradInput_ptr = gradInput_data + t*dim; real *output_ptr = output_data + t*dim; real *gradOutput_ptr = gradOutput_data + t*dim; ptrdiff_t d, ld = (ptrdiff_t)len_data[t]; accreal sum = 0; for (d = 0; d < ld; d++) sum += (accreal)gradOutput_ptr[d] * output_ptr[d]; for (d = 0; d < ld; d++) gradInput_ptr[d] = output_ptr[d] * (gradOutput_ptr[d] - sum); for (d = ld; d < dim; d++) gradInput_ptr[d] = 0; } THTensor_(free)(gradOutput); THTensor_(free)(output); } #endif
paradis.h
#include <cassert> #include <iostream> #include <vector> #include <algorithm> #include <chrono> #include <random> #include <sstream> #include <thread> #include <fstream> #include <iomanip> #include <iterator> #include <climits> #include <omp.h> #define FOR(i,a,b) for(int i=a;i<b;i++) #define rep(i,b) FOR(i,0,b) const int MaxThreadNum=224; const long long MaxDataSize=10000000000; const long long MaxDataNum=4294967295; const int MaxKisuu=256; std::vector<int> Dataset; long long Datasize; static const int kRadixBits = 8; static const size_t kInsertSortThreshold = 0; static const int kRadixMask = (1 << kRadixBits) - 1; static const int kRadixBin = 1 << kRadixBits; template<class D> inline int determineDigitBucket(int stage,D num){ return ((num>>(8*stage))&kRadixMask); } template< class _Type> inline void _swap(_Type &a, _Type&b) { _Type temp = b; b = a; a = temp; } void report_num_threads(int level) { #pragma omp single { printf("Level %d: number of threads in the team - %d\n", level, omp_get_num_threads()); } } template<class T> bool compare(const T &x,const T &y){ return x < y; } template <class RandomIt> inline void insert_sort_core_(RandomIt s, RandomIt e) { for (RandomIt i = s + 1; i < e; ++i) { if (compare(*i, *(i - 1))) { RandomIt j; auto tmp = *i; *i = *(i - 1); for (j = i - 1; j > s && compare(tmp, *(j - 1)); --j) { *j = *(j - 1); } *j = tmp; } } } template<int kth_byte,class RandomIt> inline void PARADIS_core(RandomIt s,RandomIt t,RandomIt begin_itr,int processes=1){ long long cnt[MaxKisuu]={0}; long long elenum=distance(s,t); long long start=distance(begin_itr,s); //assert(start>=0);assert(elenum>=0); //step1 //assert(processes>0); long long part=elenum/processes; long long res=elenum%processes; long long localHists[MaxThreadNum][MaxKisuu]; long long gh[MaxKisuu],gt[MaxKisuu],starts[MaxKisuu],ends[MaxKisuu]; long long ph[MaxThreadNum][MaxKisuu]; long long pt[MaxThreadNum][MaxKisuu]; long long SumCi=elenum; long long pfp[processes+1]; int var_p=processes; #pragma omp parallel num_threads(processes) { int th=omp_get_thread_num(); #pragma omp for rep(i,kRadixBin){ rep(t,processes)localHists[t][i]=0; } #pragma omp barrier #pragma omp for for(int i=start;i<start+elenum;i++){ int digit=determineDigitBucket(kth_byte,*(begin_itr+i)); localHists[th][digit]++; } #pragma omp barrier #pragma omp for for(int i=0;i<kRadixBin;i++){ for(int j=0;j<processes;j++){ cnt[i]+=localHists[j][i]; } } #pragma omp barrier #pragma omp single { gh[0]=start; gt[0]=gh[0]+cnt[0]; starts[0]=gh[0]; } //step2 #pragma omp single for(int i=1;i<kRadixBin;i++){ //calc ghi gh[i]=gh[i-1]+cnt[i-1]; //calc gti gt[i]=gh[i]+cnt[i]; starts[i]=gh[i]; } #pragma omp barrier //step3 while(SumCi!=0){ #pragma omp for for(int ii=0;ii<processes;ii++){ int pID=omp_get_thread_num(); for(int i=0;i<kRadixBin;i++){ long long part=(long long)(gt[i]-gh[i])/(long long)var_p; long long res=(long long)(gt[i]-gh[i])%(long long)(var_p); if(pID<var_p-1){ ph[pID][i]=part*pID+gh[i]; pt[pID][i]=part*(pID+1LL)+gh[i]; }else{ ph[pID][i]=part*pID+gh[i]; pt[pID][i]=part*(pID+1LL)+gh[i]+res; } } for(int i=0;i<kRadixBin;i++){ long long head=ph[pID][i]; while(head<pt[pID][i]){ auto v=*(begin_itr+head); int k=determineDigitBucket(kth_byte,v); while(k!=i&&ph[pID][k]<pt[pID][k]){ _swap(v,*(begin_itr+(int)ph[pID][k]));ph[pID][k]++; k=determineDigitBucket(kth_byte,v); } if(k==i){ *(begin_itr+head)=*(begin_itr+ph[pID][i]);head++; *(begin_itr+ph[pID][i])=v;ph[pID][i]++; }else{ *(begin_itr+head)=v;head++; } } } }//end of omp permute #pragma omp single { SumCi=0; long long pfpN=kRadixBin/var_p; long long pfpM=kRadixBin%var_p; pfp[0]=0LL; long long pfpMR=0LL; for(long long i=1LL;i<var_p+1LL;i++){ if(pfpMR<pfpM)pfpMR++; pfp[i]=i*pfpN+pfpMR; } } #pragma omp barrier #pragma omp for for(int k=0;k<processes;k++){ for(long long i=pfp[k];i<pfp[k+1];i++){ long long tail=gt[i]; { for(int pID=0;pID<processes;pID++){ long long head=ph[pID][i]; while(head<pt[pID][i]&&head<tail){ int v=*(begin_itr+head);head++; if(determineDigitBucket(kth_byte,v)!=i){ while(head<=tail){ tail--; int w=*(begin_itr+tail); if(determineDigitBucket(kth_byte,w)==i){ *(begin_itr+(head-1))=w; *(begin_itr+tail)=v; break; } } } } } } gh[i]=tail; } } #pragma omp barrier #pragma omp single { int prevSumCi=SumCi; SumCi-0; for(int i=0;i<kRadixBin;i++){ SumCi+=(gt[i]-gh[i]); } } #pragma omp barrier }//end of while }//end of omp2 if(kth_byte>0){ #pragma omp parallel num_threads(processes) #pragma omp single { for(int i=0;i<kRadixBin;i++){ int nextStageThreads=1; nextStageThreads=processes*(cnt[i]*(log(cnt[i])/log(kRadixBin))/(elenum*(log(elenum)/log(kRadixBin)))); if(cnt[i]>64LL){ #pragma omp task PARADIS_core<(kth_byte > 0 ? (kth_byte - 1) : 0)>(begin_itr+starts[i],begin_itr+(starts[i]+cnt[i]),begin_itr,std::max(nextStageThreads,1)); }else if(cnt[i]>1){ insert_sort_core_(begin_itr+starts[i],begin_itr+(starts[i]+cnt[i])); //std::sort(begin_itr+starts[i],begin_itr+(starts[i]+cnt[i])); } } #pragma omp taskwait } } } template<class RandomIt> inline void PARADIS(RandomIt s,RandomIt t,int threadNum){ const size_t vsize=sizeof(typename std::iterator_traits<RandomIt>::value_type); PARADIS_core<vsize-1>(s,t,s,threadNum); }
align_stats.c
#include <getopt.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #include <pthread.h> #include "gem_tools.h" #include "align_stats.h" // Every entry will be set to zero be default static const char base_tab[256]= { ['A']=2, ['C']=3, ['G']=4, ['T']=5, ['N']=1, ['a']=2, ['c']=3, ['g']=4, ['t']=5, ['n']=1}; static const char id_brk_char[256]= { [':']=ID_COLON_CHAR, ['#']=ID_HASH_CHAR, [' ']=ID_SPACE_CHAR, ['/']=ID_SLASH_CHAR, [0]=ID_END_CHAR }; static void usage(FILE *f) { fputs("usage:\n align_stats\n",f); fputs(" -r|--reads <reads file or file pair>\n",f); fputs(" -o|--output <output stats file>\n",f); fputs(" -d|--insert_dist <output insert size distribution file>\n",f); fputs(" -M|--min_insert <minimum insert size> (for pairing of single end files: default=0)\n",f); fprintf(f," -m|--max_insert <maximum insert size> (for pairing of single end files: default=%d)\n",DEFAULT_MAX_INSERT); #ifdef HAVE_OPENMP fputs(" -t|--threads <number of threads>\n",f); #endif #ifdef HAVE_ZLIB fputs(" -z|--gzip (compress output files with gzip\n",f); fputs(" -Z|--no=compress (default)\n",f); #endif #ifdef HAVE_BZLIB fputs(" -j|--bzip2 (compress output files with bzip2\n",f); #endif fputs(" -l|--read_length <untrimmed read length>\n",f); fputs(" -V|--variable Variable length reads\n",f); fputs(" -p|--paired Paired mapping input file\n",f); fputs(" -i|--ignore_id Do not attempt to parse read IDs\n",f); fputs(" -w|--mmap mmap input files\n",f); fprintf(f," -P|--phage_lambda <identifier for phage lambda> (default='%s')\n",PHAGE_LAMBDA); fprintf(f," -X|--phix174 <identifier for phiX174> (default='%s')\n",PHIX174); fprintf(f," -L|--max_read_length <maximum valid read length> (default=%u)\n",MAX_READ_LENGTH); fprintf(f," -F|--fastq select fastq quality coding %s\n",DEFAULT_QUAL_OFFSET==QUAL_FASTQ?"(default)":""); fprintf(f," -S|--solexa select ilumina quality coding %s\n",DEFAULT_QUAL_OFFSET==QUAL_SOLEXA?"(default)":""); fprintf(f," -q|--qual_off select quality value offset (default=%d)\n",DEFAULT_QUAL_OFFSET); fputs(" -h|help|usage (print this file\n\n",f); } static void set_opt(char *opt,char **opt_p,char *val) { if(*opt_p) { fprintf(stderr,"multiple %s options: '%s' overwriting previous definition '%s'\n",opt,val,*opt_p); free(*opt_p); } *opt_p=strdup(val); } static void *as_malloc(size_t s) { void *p; p=malloc(s); gt_cond_fatal_error(!p,MEM_HANDLER); return p; } static void *as_calloc(size_t n,size_t s) { void *p; p=calloc(n,s); gt_cond_fatal_error(!p,MEM_HANDLER); return p; } static void *as_realloc(void *ptr,size_t s) { void *p; p=realloc(ptr,s); gt_cond_fatal_error(!p,MEM_HANDLER); return p; } static void as_set_output_files(as_param *param) { char *csuff[3]={"",".gz",".bz2"}; char *cs; if(!(param->output_file && param->dist_file)) { switch(param->compress) { case GZIP: cs=csuff[1]; break; case BZIP2: cs=csuff[2]; break; default: cs=csuff[0]; break; } if(param->input_files[0]) { char *root=strdup(param->input_files[0]); char *p=strrchr(root,'/'); if(p) root=p+1; p=strchr(root,'.'); if(p) *p=0; if(param->input_files[1]) { if(p-root>2 && (p[-1]=='1' || p[-1]=='2')) { if(p[-2]=='.' || p[-2]=='_') p[-2]=0; else p[-1]=0; } else if(p-root>6 && !strncmp(p-4,"_map",4) && (p[-5]=='1' || p[-5]=='2') && p[-6]=='_') p[-6]=0; } else { if(p-root>4 && !strncmp(p-4,"_map",4)) p[-4]=0; } if(!param->output_file) asprintf(&param->output_file,"%s_report.txt%s",root,cs); if(!param->dist_file) asprintf(&param->dist_file,"%s_frag_dist.txt%s",root,cs); } else { if(!param->output_file) asprintf(&param->output_file,"align_stats_report.txt%s",cs); if(!param->dist_file) asprintf(&param->dist_file,"align_stats_frag_dist.txt%s",cs); } } if(!strcmp(param->output_file,"-")) param->output_file=0; if(!strcmp(param->dist_file,"-")) param->dist_file=0; } static id_tag *new_id_tag(void) { id_tag *idt; idt=as_malloc(sizeof(id_tag)); idt->instrument_name=gt_string_new(128); idt->run=gt_string_new(128); idt->flowcell=gt_string_new(128); idt->index=gt_string_new(128); return idt; } static void clear_id_tag(id_tag *idt) { gt_string_clear(idt->instrument_name); gt_string_clear(idt->run); gt_string_clear(idt->flowcell); gt_string_clear(idt->index); } static void free_id_tag(id_tag *idt) { gt_string_delete(idt->instrument_name); gt_string_delete(idt->run); gt_string_delete(idt->flowcell); gt_string_delete(idt->index); free(idt); } static uint64_t parse_id_tag(gt_string *tag,id_tag *idt) { // @HWUSI-EAS100R:6:73:941:1973#0/1 Old style Casava tag // Machine:lane:tile:x:y#multiplex tag/read // // @EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG New style (v1.8) tag // Machine:run:flowcell:lane:tile:x:y read:filter:flags:multiplex tag // // The end of the tag (/read for old style or the part after the space for the new style) may have been trimmed // char type[11],*tg,*p; int fields[11],len[11]; int i=0,ix=0,err=ID_TAG_OK; clear_id_tag(idt); tg=gt_string_get_string(tag); ix=0; fields[0]=0; register char c; while(tg[ix] && i<11) { while(!(c=id_brk_char[(int)tg[ix]])) ix++; len[i]=ix+1-fields[i]; type[i++]=c; ix++; if(c==ID_END_CHAR || i==11) break; if(c==ID_SPACE_CHAR) while((c=id_brk_char[(int)tg[ix]])==ID_SPACE_CHAR) ix++; // Skip multiple white space characters fields[i]=ix; } int j; for(j=0;j<i;j++) if(type[j]!=ID_COLON_CHAR) break; int sidx; if((i==6 || i==7) && j==4 && type[j]==ID_HASH_CHAR) { gt_string_copy_substr(idt->instrument_name,tag,fields[0],len[0]); sidx=1; } else { gt_string_copy_substr(idt->instrument_name,tag,fields[0],len[0]); gt_string_copy_substr(idt->run,tag,fields[1],len[1]); gt_string_copy_substr(idt->flowcell,tag,fields[2],len[2]); sidx=3; } idt->lane=(uint32_t)strtoul(tg+fields[sidx],&p,10); if(idt->lane<1 || idt->lane>MAX_LANE_ID) { err=ID_TAG_ERROR_BAD_LANE; } if(err==ID_TAG_OK) { idt->tile=(uint32_t)strtoul(tg+fields[sidx+1],&p,10); if(idt->tile<1) { err=ID_TAG_ERROR_BAD_TILE; } } if(err==ID_TAG_OK) { idt->x=(uint32_t)strtoul(tg+fields[sidx+2],&p,10); if(idt->x<1) { err=ID_TAG_ERROR_BAD_COORD; } } if(err==ID_TAG_OK) { idt->y=(uint32_t)strtoul(tg+fields[sidx+3],&p,10); if(idt->y<1) { err=ID_TAG_ERROR_BAD_COORD; } } // if(err==ID_TAG_OK) { // printf("Machine: " PRIgts "\tRun: " PRIgts "\tFC: " PRIgts "\tLane: %d\tTile: %" PRIu32 "\tX,Y: %" PRIu32 ",%" PRIu32 "\n",PRIgts_content(idt->instrument_name),PRIgts_content(idt->run),PRIgts_content(idt->flowcell),idt->lane,idt->tile,idt->x,idt->y); // } return err; } static as_stats* as_stats_new(bool paired) { as_stats* stats=as_calloc((size_t)1,sizeof(as_stats)); stats->max_indel_length=50; // We can expand if necessary stats->paired=paired; int i,j=(paired==true?2:1); for(i=0;i<j;i++) { int k; for(k=0;k<2;k++) stats->indel_length[i*2+k]=as_calloc(sizeof(uint64_t),stats->max_indel_length+1); } stats->insert_size=0; stats->loc_hash=0; return stats; } static void as_stats_free(as_stats *stats) { uint64_t i,j=(stats->paired==true?2:1); for(i=0;i<j;i++) { if(stats->curr_read_store[i]) { free(stats->read_length_stats[i]); uint64_t k; for(k=0;k<2;k++) { free(stats->indel_length[i*2+k]); free(stats->indel_stats[i*2+k]); } for(k=0;k<stats->curr_read_store[i];k++) free(stats->base_counts_by_cycle[i][k]); for(k=i*(MAX_QUAL+1);k<(i+1)*(MAX_QUAL+1);k++) { free(stats->mm_stats[k]); free(stats->qual_stats[k]); } free(stats->base_counts_by_cycle[i]); } } if(stats->paired==true) { HASH_CLEAR(hh,stats->insert_size); } free(stats); } static void add_indel_stats(as_stats *stats,uint64_t size,uint64_t cycle,uint64_t ix) { stats->indel_stats[ix][cycle]++; if(size<=MAX_INDEL_SIZE) { if(size>stats->max_indel_length) { uint64_t nsize=size*1.2; if(nsize>MAX_INDEL_SIZE) nsize=MAX_INDEL_SIZE; uint64_t i,j=(stats->paired==true?2:1); for(i=0;i<j;i++) { int k; for(k=0;k<2;k++) { stats->indel_length[i*2+k]=as_realloc(stats->indel_length[i*2+k],sizeof(uint64_t)*(nsize+1)); uint64_t sz; for(sz=stats->max_indel_length+1;sz<=nsize;sz++) stats->indel_length[i*2+k][sz]=0; } } stats->max_indel_length=nsize; } stats->indel_length[ix][size]++; } } static dist_element *as_find_insert_counter(dist_element **de,int64_t x) { dist_element *new_de; HASH_FIND(hh,*de,&x,sizeof(int64_t),new_de); if(!new_de) { new_de=as_malloc(sizeof(dist_element)); int i; for(i=0;i<4;i++) new_de->ct[i]=0; new_de->x=x; HASH_ADD(hh,*de,x,sizeof(int64_t),new_de); } return new_de; } static dist_element *as_increase_insert_count(dist_element **de,int ix,int64_t x) { dist_element *new_de=as_find_insert_counter(de,x); new_de->ct[ix]++; return new_de; } #define LH_BIN_SIZE 1024 static pthread_rwlock_t loc_hash_rwlock; static void insert_loc(as_stats *stats,uint64_t x,int64_t ins_size,uint32_t tile,gt_string *ctg) { loc_hash *lh; unsigned int k=x/LH_BIN_SIZE; uint16_t loc=x%LH_BIN_SIZE; pthread_rwlock_rdlock(&loc_hash_rwlock); HASH_FIND_STR(*stats->loc_hash,gt_string_get_string(ctg),lh); pthread_rwlock_unlock(&loc_hash_rwlock); if(!lh) { lh=as_malloc(sizeof(loc_hash)); lh->ctg=strdup(gt_string_get_string(ctg)); lh->lblock=0; pthread_rwlock_init(&lh->rwlock,NULL); pthread_rwlock_wrlock(&loc_hash_rwlock); HASH_ADD_KEYPTR(hh,*stats->loc_hash,lh->ctg,(int)strlen(lh->ctg),lh); pthread_rwlock_unlock(&loc_hash_rwlock); } loc_block *lb; pthread_rwlock_rdlock(&lh->rwlock); HASH_FIND_INT(lh->lblock,&k,lb); pthread_rwlock_unlock(&lh->rwlock); if(!lb) { lb=as_malloc(sizeof(loc_block)); lb->n_elem=0; lb->size=INIT_LB_SIZE; lb->x=k; lb->elem=as_malloc(lb->size*sizeof(loc_elem)); pthread_mutex_init(&lb->mutex,NULL); pthread_rwlock_wrlock(&lh->rwlock); HASH_ADD_INT(lh->lblock,x,lb); pthread_rwlock_unlock(&lh->rwlock); } pthread_mutex_lock(&lb->mutex); if(lb->n_elem==lb->size) { lb->size*=1.5; lb->elem=as_realloc(lb->elem,lb->size*sizeof(loc_elem)); } loc_elem* le=lb->elem+(lb->n_elem++); pthread_mutex_unlock(&lb->mutex); le->loc=loc; le->tile=tile; le->dist=ins_size; } static void as_stats_resize(as_stats *stats,uint64_t rd,uint64_t l) { stats->max_read_length[rd]=l; if(l>=stats->curr_read_store[rd]) { uint64_t i,nlen=l*1.5; // Allocate a bit more space than we need now to avoid un-necessary re-sizing in future if(stats->curr_read_store[rd]) { stats->read_length_stats[rd]=as_realloc(stats->read_length_stats[rd],nlen*sizeof(uint64_t)); stats->base_counts_by_cycle[rd]=as_realloc(stats->base_counts_by_cycle[rd],sizeof(void *)*nlen); uint64_t j; for(j=rd*2;j<rd*2+2;j++) stats->indel_stats[j]=as_realloc(stats->indel_stats[j],sizeof(uint64_t)*nlen); for(j=rd*(MAX_QUAL+1);j<(rd+1)*(MAX_QUAL+1);j++) { stats->mm_stats[j]=as_realloc(stats->mm_stats[j],sizeof(uint64_t)*nlen); stats->qual_stats[j]=as_realloc(stats->qual_stats[j],sizeof(uint64_t)*nlen); } for(i=stats->curr_read_store[rd];i<nlen;i++) { stats->read_length_stats[rd][i]=0; stats->base_counts_by_cycle[rd][i]=as_calloc((size_t)(MAX_QUAL+1)*5,sizeof(uint64_t)); for(j=rd*2;j<rd*2+2;j++) stats->indel_stats[j][i]=0; for(j=rd*(MAX_QUAL+1);j<(rd+1)*(MAX_QUAL+1);j++) { stats->mm_stats[j][i]=0; stats->qual_stats[j][i]=0; } } } else { stats->read_length_stats[rd]=as_calloc((size_t)nlen,sizeof(uint64_t)); stats->base_counts_by_cycle[rd]=as_malloc(sizeof(void *)*nlen); uint64_t j; for(j=rd*2;j<rd*2+2;j++) stats->indel_stats[j]=as_calloc((size_t)nlen,sizeof(uint64_t)); for(j=rd*(MAX_QUAL+1);j<(rd+1)*(MAX_QUAL+1);j++) { stats->mm_stats[j]=as_calloc((size_t)nlen,sizeof(uint64_t)); stats->qual_stats[j]=as_calloc((size_t)nlen,sizeof(uint64_t)); } for(i=0;i<nlen;i++) { stats->base_counts_by_cycle[rd][i]=as_calloc((size_t)(MAX_QUAL+1)*5,sizeof(uint64_t)); } } stats->curr_read_store[rd]=nlen; } } static void get_error_profile(as_stats *stats,gt_alignment *al,uint64_t rd,int qual_offset) { static int mis_type[]={0,2,1,2,2,0,2,1,1,2,0,2,2,1,2,0}; if(!al->maps->used) return; // Get first map only from alignment register gt_string* const read = al->read; register gt_string* const quals = al->qualities; register const bool has_qualities = gt_alignment_has_qualities(al); gt_map *map=gt_alignment_get_map(al,0); register int quality_misms = 0; uint64_t i; for(i=0;i<read->length;i++) { if(has_qualities) { quality_misms = gt_string_get_string(quals)[i]-qual_offset; if(quality_misms>MAX_QUAL) quality_misms=MAX_QUAL; } else quality_misms=0; stats->qual_stats[rd*(MAX_QUAL+1)+quality_misms][i]++; } GT_MAP_ITERATE(map,map_block) { GT_MISMS_ITERATE(map_block,misms) { if (has_qualities) { quality_misms = gt_string_get_string(quals)[misms->position]-qual_offset; if(quality_misms>MAX_QUAL) quality_misms=MAX_QUAL; else if(quality_misms<0) quality_misms=0; } switch (misms->misms_type) { case MISMS: stats->mm_stats[rd*(MAX_QUAL+1)+quality_misms][misms->position]++; int base=base_tab[(int)gt_string_get_string(read)[misms->position]]-2; int rbase=base_tab[(int)misms->base]-2; if(base>=0 && rbase>=0) { // Get transition/transversion counts int type=mis_type[(base<<2)|rbase]; if(type==1) stats->ts_stats[rd][quality_misms]++; else if(type==2) stats->tv_stats[rd][quality_misms]++; } if(base>=0 && misms->position) { int prev_base=base_tab[(int)gt_string_get_string(read)[misms->position-1]]-2; if(prev_base>=0) { if(base==prev_base) stats->pbc_stats[rd][quality_misms]++; else stats->pbn_stats[rd][quality_misms]++; } } break; case INS: add_indel_stats(stats,misms->size,misms->position,rd); break; case DEL: add_indel_stats(stats,misms->size,misms->position,2+rd); break; } } } } static void as_collect_stats(gt_template* template,as_stats* stats,as_param *param,id_tag *idt) { stats->nreads++; uint64_t nrd; bool paired_file=false; // Was the input file from a paired mapping if(gt_input_generic_parser_attributes_is_paired(param->parser_attr)) { if(gt_template_get_num_blocks(template)!=2) { gt_fatal_error_msg("Fatal error: Expecting paired reads\n"); } nrd=2; if(!param->input_files[1]) paired_file=true; } else { if(gt_template_get_num_blocks(template)!=1) { gt_fatal_error_msg("Fatal error: Expecting unpaired reads\n"); } nrd=1; } uint64_t j; gt_alignment *al[2]; char *rd[2],*ql[2]; uint64_t len[2]; register int qual_offset=param->qual_offset; // Get alignments, qualities, reads and lengths for both ends for(j=0;j<nrd;j++) { al[j]=gt_template_get_block(template,j); rd[j]=gt_alignment_get_read(al[j]); ql[j]=gt_alignment_get_qualities(al[j]); len[j]=strlen(rd[j]); // Update yield max_read_length and resize stats arrays if necessary if(stats->max_read_length[j]<len[j]) as_stats_resize(stats,j,len[j]); stats->read_length_stats[j][len[j]]++; uint64_t i,yld=0; char *p=rd[j]; char *q=ql[j]; uint64_t **bc=stats->base_counts_by_cycle[j]; for(i=0;i<len[j];i++) { int base=base_tab[(int)p[i]]-1; int qual=q[i]-qual_offset; if(qual<0 || qual>MAX_QUAL || base<0) { gt_fatal_error_msg("Illegal base or quality character '%c %c' in read\n",p[i],q[i]); } if(base) yld++; bc[i][qual*5+base]++; } stats->yield[j]+=yld; } // Filter maps (both single and paired end) to remove maps after first zero strata after the first hit uint64_t nmaps[3]={0,0,0}; uint64_t max_dist[3]={0,0,0}; bool ambig[2]={false,false}; // Single end alignments for(j=0;j<nrd;j++) { uint64_t i,k; k=gt_alignment_get_num_counters(al[j]); for(i=0;i<k;i++) { uint64_t x=gt_alignment_get_counter(al[j],i); if(x) { nmaps[j]+=x; max_dist[j]=i; } else if(nmaps[j]) break; } if(i==k-1 && paired_file==false) ambig[j]=true; // Collect error states from first alignment only if(nmaps[j]) { get_error_profile(stats,al[j],j,qual_offset); } } if(nrd==2) { // Paired end alignments uint64_t i,k; k=gt_template_get_num_counters(template); for(i=0;i<k;i++) { uint64_t x=gt_template_get_counter(template,i); if(x) { nmaps[2]+=x; max_dist[2]=i; } else if(nmaps[2]) break; } // Now count number of split maps for each end uint64_t nsplit[3]={0,0,0}; for(j=0;j<2;j++) { bool flg=false; GT_ALIGNMENT_ITERATE(al[j],map) { if(gt_map_get_distance(map)<=max_dist[j]) { if(gt_map_get_num_blocks(map)>1) nsplit[j]++; else flg=true; } } if(nsplit[j]) { stats->reads_with_splitmaps[j]++; if(flg==false) stats->reads_only_with_splitmaps[j]++; } } // And for paired alignments bool flg=false; GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,maps,maps_attr) { if(maps[0] && maps[1]) { if(maps_attr->distance<=max_dist[2]) { if(gt_map_get_num_blocks(maps[0])>1 || gt_map_get_num_blocks(maps[1])>1) nsplit[2]++; else flg=true; } } } if(nsplit[2]) { stats->reads_with_splitmaps[2]++; if(flg==false) stats->reads_only_with_splitmaps[2]++; } for(j=0;j<2;j++) { if(nmaps[j]) { stats->mapped[j]++; if(nmaps[j]==1) { if(ambig[j]==false) stats->unique[j]++; else stats->ambiguous[j]++; } } } if(nmaps[2]) { stats->paired_mapped++; if(nmaps[2]==1 && (nmaps[0]<=gt_alignment_get_num_maps(al[0])) && (nmaps[1]<=gt_alignment_get_num_maps(al[1]))) { stats->paired_unique++; maps=gt_template_get_mmap_array(template,0,NULL); if(maps[0] && maps[1]) { gt_status gt_err; int64_t ins_size=gt_template_get_insert_size(maps,&gt_err,0,0); if(gt_err==GT_TEMPLATE_INSERT_SIZE_OK) { dist_element* de=as_increase_insert_count(&stats->insert_size,AS_INSERT_TYPE_PAIRED,ins_size); if(nmaps[0]>1 || nmaps[1]>1) de->ct[AS_INSERT_TYPE_RECOVERED]++; if(nsplit[2]) de->ct[AS_INSERT_TYPE_SPLIT]++; } } } } // Track insert sizes for all pairs where single end reads are uniquely mapping if(nmaps[0]==1 && nmaps[1]==1) { gt_status gt_err; gt_map *tmaps[2]; tmaps[0]=gt_alignment_get_map(al[0],0); tmaps[1]=gt_alignment_get_map(al[1],0); uint64_t xx; gt_string *contig; int64_t ins_size=gt_template_get_insert_size(tmaps,&gt_err,&xx,&contig); if(gt_err==GT_TEMPLATE_INSERT_SIZE_OK) { (void)as_increase_insert_count(&stats->insert_size,AS_INSERT_TYPE_ALL_UNIQUE,ins_size); stats->paired_type[PAIR_TYPE_DS]++; insert_loc(stats,xx,ins_size,idt->tile,contig); } else if(gt_err==GT_TEMPLATE_INSERT_SIZE_SAME_STRAND) stats->paired_type[PAIR_TYPE_SS]++; else if(gt_err==GT_TEMPLATE_INSERT_SIZE_DIFFERENT_CONTIGS) stats->paired_type[PAIR_TYPE_MM]++; } } else { /* We can still track duplicates for single end reads, although we will find too many */ gt_map* tmap=gt_alignment_get_map(al[0],0); insert_loc(stats,tmap->position,0,idt->tile,tmap->seq_name); } } static int cmp_dist_elem(dist_element *a,dist_element *b) { return a->x-b->x; } static int cmp_loc_elem(const void *s1,const void *s2) { const loc_elem *le1,*le2; int x; le1=s1; le2=s2; if(!(x=le1->loc-le2->loc)) { if(!(x=abs(le1->dist)-abs(le2->dist))) { if(!(x=le1->tile-le2->tile)) { x=le1->dist-le2->dist; } } } return x; } static void *as_calc_duplicate_rate(void *ss) { as_param* param=ss; as_stats* stats=param->stats[0]; loc_hash* lh=*(stats->loc_hash); uint64_t (*dup_cnt)[DUP_LIMIT+1]=stats->duplicate_counts; uint64_t dcounts[2][DUP_LIMIT+1]; uint64_t tot=0; int i,j; for(i=0;i<5;i++) for(j=0;j<=DUP_LIMIT;j++) dup_cnt[i][j]=0; for(;lh;lh=lh->hh.next) { loc_block *lb; for(lb=lh->lblock;lb;lb=lb->hh.next) { qsort(lb->elem,lb->n_elem,sizeof(loc_elem),cmp_loc_elem); u_int16_t tile,loc; int16_t dst=0; tile=loc=0; int k,k1,xx; k=k1=xx=0; uint64_t kk[4]={0,0,0,0}; tot+=lb->n_elem; loc_elem* le=lb->elem; for(i=0;i<(int)lb->n_elem;i++,le++) { if(le->loc!=loc || abs(le->dist)!=abs(dst)) { if(k) { if(k>DUP_LIMIT) k=DUP_LIMIT+1; else if(k>1) { assert(xx<=DUP_LIMIT); dcounts[0][xx]=kk[0]; dcounts[1][xx++]=kk[1]; for(k1=0;k1<4;k1++) kk[k1]=0; for(k1=0;k1<xx;k1++) { int k2; for(k2=0;k2<2;k2++) { int k3=dcounts[k2][k1]; if(k3>1) kk[0]+=k3*(k3-1); } kk[1]+=dcounts[0][k1]*dcounts[1][k1]; for(k2=0;k2<k1;k2++) { kk[2]+=dcounts[0][k1]*dcounts[0][k2]+dcounts[1][k1]*dcounts[1][k2]; kk[3]+=dcounts[0][k1]*dcounts[1][k2]+dcounts[1][k1]*dcounts[0][k2]; } } kk[0]>>=1; for(k1=0;k1<4;k1++) dup_cnt[k1+1][k-1]+=kk[k1]; xx=0; } dup_cnt[0][k-1]++; } k=1; xx=0; tile=le->tile;loc=le->loc; dst=le->dist; k1=(dst<0)?0:1; kk[k1]=1; kk[k1^1]=0; } else { k++; if(le->tile!=tile) { if(xx<DUP_LIMIT) { dcounts[0][xx]=kk[0]; dcounts[1][xx++]=kk[1];; tile=le->tile; dst=le->dist; k1=(dst<0)?0:1; kk[k1]=1; kk[k1^1]=0; } } else { if(le->dist!=dst) { k1=1; dst=le->dist; } kk[k1]++; } } } if(k) { if(k>DUP_LIMIT) k=DUP_LIMIT+1; else if(k>1) { assert(xx<=DUP_LIMIT); dcounts[0][xx]=kk[0]; dcounts[1][xx++]=kk[1]; for(k1=0;k1<4;k1++) kk[k1]=0; for(k1=0;k1<xx;k1++) { int k2; for(k2=0;k2<2;k2++) { int k3=dcounts[k2][k1]; if(k3>1) kk[0]+=k3*(k3-1); } kk[1]+=dcounts[0][k1]*dcounts[1][k1]; for(k2=0;k2<k1;k2++) { kk[2]+=dcounts[0][k1]*dcounts[0][k2]+dcounts[1][k1]*dcounts[1][k2]; kk[3]+=dcounts[0][k1]*dcounts[1][k2]+dcounts[1][k1]*dcounts[0][k2]; } } kk[0]>>=1; for(k1=0;k1<4;k1++) dup_cnt[k1+1][k-1]+=kk[k1]; xx=0; } dup_cnt[0][k-1]++; } } } double z1,z2,z3,z4,z5,z6; z1=z2=z3=z4=z5=z6=0.0; //int k=0; for(i=0;i<=DUP_LIMIT;i++) { z1+=(double)dup_cnt[0][i]; z2+=(double)dup_cnt[0][i]*(i+1); z3+=(double)dup_cnt[1][i]; z4+=(double)dup_cnt[2][i]; z5+=(double)dup_cnt[3][i]; z6+=(double)dup_cnt[4][i]; //if(dup_cnt[0][i]) k=i; } if(z2 && (z3+z4+z5+z6)) { double z,z7,z8,z9; z=1.0-z1/z2; z1=(z3*z6>0.0)?(z3*(z5+z6)-z5*(z3+z4))/(z3*z6):1.0; z2=z3*z1/(z3+z4+z5+z6); z7=1.0-(1.0-z2)*z; z8=tot*1000.0; z9=z8; for(i=0;i<10000;i++) { z8=tot*z7/(1.0-exp(log(1.0-1.0/z8)*tot)); if(fabs(z8-z9)<1.0e-2) break; z9=z8; } stats->duplicate_rate[0]=z; stats->duplicate_rate[1]=z2; stats->duplicate_reads_used=tot; stats->unique_fragment_estimate=(uint64_t)(z8+.5); } else { stats->duplicate_rate[0]=stats->duplicate_rate[1]=0.0; stats->duplicate_reads_used=stats->unique_fragment_estimate=0; } return 0; } static void *as_merge_stats(void *ss) { uint64_t i,j,k,k1,nr,len[2],ins_size; as_param* param=ss; as_stats** st=param->stats; uint64_t nt=param->num_threads; bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr); nr=paired?2:1; for(j=0;j<nr;j++) { len[j]=st[0]->max_read_length[j]; for(i=1;i<nt;i++) { if(st[i]->max_read_length[j]>len[j]) len[j]=st[i]->max_read_length[j]; } if(len[j]>st[0]->max_read_length[j]) as_stats_resize(st[0],j,len[j]); } ins_size=st[0]->max_indel_length; for(i=1;i<nt;i++) if(st[i]->max_indel_length>ins_size) ins_size=st[i]->max_indel_length; if(ins_size>st[0]->max_indel_length) { for(j=0;j<nr;j++) { int k; for(k=0;k<2;k++) { st[0]->indel_length[j*2+k]=as_realloc(st[0]->indel_length[j*2+k],sizeof(uint64_t)*(ins_size+1)); uint64_t sz; for(sz=st[0]->max_indel_length+1;sz<=ins_size;sz++) st[0]->indel_length[j*2+k][sz]=0; } } } for(i=1;i<nt;i++) { st[0]->nreads+=st[i]->nreads; for(j=0;j<nr;j++) { st[0]->yield[j]+=st[i]->yield[j]; st[0]->mapped[j]+=st[i]->mapped[j]; st[0]->unique[j]+=st[i]->unique[j]; st[0]->ambiguous[j]+=st[i]->ambiguous[j]; for(k=0;k<=MAX_QUAL;k++) { st[0]->ts_stats[j][k]+=st[i]->ts_stats[j][k]; st[0]->tv_stats[j][k]+=st[i]->tv_stats[j][k]; st[0]->pbc_stats[j][k]+=st[i]->pbc_stats[j][k]; st[0]->pbn_stats[j][k]+=st[i]->pbn_stats[j][k]; } len[j]=st[i]->max_read_length[j]; uint64_t **bc0=st[0]->base_counts_by_cycle[j]; uint64_t **bc1=st[i]->base_counts_by_cycle[j]; if(st[i]->curr_read_store[j]) { if(st[i]->curr_read_store[j]>st[0]->curr_read_store[j]) as_stats_resize(st[0],j,st[i]->curr_read_store[j]); for(k=0;k<=len[j];k++) { st[0]->read_length_stats[j][k]+=st[i]->read_length_stats[j][k]; for(k1=0;k1<5*(MAX_QUAL+1);k1++) bc0[k][k1]+=bc1[k][k1]; } for(k1=j*(MAX_QUAL+1);k1<(j+1)*(MAX_QUAL+1);k1++) { for(k=0;k<len[j];k++) { st[0]->mm_stats[k1][k]+=st[i]->mm_stats[k1][k]; st[0]->qual_stats[k1][k]+=st[i]->qual_stats[k1][k]; } } for(k1=j*2;k1<j*2+2;k1++) { for(k=0;k<len[j];k++) st[0]->indel_stats[k1][k]+=st[i]->indel_stats[k1][k]; for(k=0;k<=st[i]->max_indel_length;k++) st[0]->indel_length[k1][k]+=st[i]->indel_length[k1][k]; } } } for(j=0;j<3;j++) { st[0]->paired_type[j]+=st[i]->paired_type[j]; st[0]->bis_stats[j]+=st[i]->bis_stats[j]; st[0]->reads_with_splitmaps[j]+=st[i]->reads_with_splitmaps[j]; st[0]->reads_only_with_splitmaps[j]+=st[i]->reads_only_with_splitmaps[j]; } if(paired) { st[0]->paired_mapped+=st[i]->paired_mapped; st[0]->paired_unique+=st[i]->paired_unique; dist_element *de,*de1; for(de=st[i]->insert_size;de!=NULL;de=de->hh.next) { de1=as_find_insert_counter(&st[0]->insert_size,de->x); for(j=0;j<4;j++) de1->ct[j]+=de->ct[j]; } } as_stats_free(st[i]); } HASH_SORT(st[0]->insert_size,cmp_dist_elem); return 0; } static void as_print_yield_summary(FILE *f,as_param *param) { bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr); as_stats* st=param->stats[0]; uint64_t trimmed[2]={0,0},yield[2]={0,0},min_rl[2],i,j,k; fputs("Yield summary\n\n",f); j=paired?2:1; for(i=0;i<j;i++) { uint64_t l=st->max_read_length[i]; for(k=0;k<=l;k++) if(st->read_length_stats[i][k]) break; min_rl[i]=k; for(;k<=l;k++) { uint64_t x=st->read_length_stats[i][k]; trimmed[i]+=(l-k)*x; yield[i]+=k*x; } } if(paired) { fprintf(f,"Paired end reads. No. pairs =\t%" PRIu64 "\n",st->nreads); if(!param->variable_read_length) { fprintf(f,"Read lengths:\tRead 1 =\t%" PRIu64 "\tRead 2 =\t%" PRIu64 "\n",st->max_read_length[0],st->max_read_length[1]); fprintf(f,"Yield PF:\tRead 1 = \t%" PRIu64 "\tRead 2 = \t%" PRIu64 "\tTotal = \t%" PRIu64 "\n",yield[0]+trimmed[0],yield[1]+trimmed[1],yield[0]+yield[1]+trimmed[0]+trimmed[1]); fprintf(f,"Bases trimmed:\tRead 1 = \t%" PRIu64 "\t(%.2f%%)\tRead 2 = \t%" PRIu64 "\t(%.2f%%)\tTotal = \t%" PRIu64 "\t(%.2f%%)\n", trimmed[0],100.0*(double)trimmed[0]/(double)(yield[0]+trimmed[0]), trimmed[1],100.0*(double)trimmed[1]/(double)(yield[1]+trimmed[1]), trimmed[0]+trimmed[1],100.0*(double)(trimmed[0]+trimmed[1])/(double)(yield[0]+yield[1]+trimmed[0]+trimmed[1])); } else { fprintf(f,"Read lengths:\tRead 1 =\t%" PRIu64 " - %" PRIu64 "\tRead 2 =\t%" PRIu64 " - %" PRIu64 "\n",min_rl[0],st->max_read_length[0],min_rl[1],st->max_read_length[1]); fprintf(f,"Yield PF:\tRead 1 =\t%" PRIu64 "\tRead 2 =\t%" PRIu64 "\tTotal =\t%" PRIu64 "\n",yield[0],yield[1],yield[0]+yield[1]); } fprintf(f,"No calls:\tRead 1 =\t%" PRIu64 "\t(%.2f%%)\tRead 2 =\t%" PRIu64 "\t(%.2f%%)\tTotal =\t%" PRIu64 "\t(%.2f%%)\n", yield[0]-st->yield[0],100.0*(double)(yield[0]-st->yield[0])/(double)yield[0], yield[1]-st->yield[1],100.0*(double)(yield[1]-st->yield[1])/(double)yield[1], yield[0]+yield[1]-st->yield[0]-st->yield[1],100.0*(double)(yield[0]+yield[1]-st->yield[0]-st->yield[1])/(double)(yield[0]+yield[1])); fprintf(f,"%s yield:\tRead 1 =\t%" PRIu64 "\t(%.2f%%)\tRead 2 =\t%" PRIu64 "\t(%.2f%%)\tTotal =\t%" PRIu64 "\t(%.2f%%)\n",param->variable_read_length?"Clean":"Trimmed", st->yield[0],100.0*(double)st->yield[0]/(double)(yield[0]+trimmed[0]), st->yield[1],100.0*(double)st->yield[1]/(double)(yield[1]+trimmed[1]), st->yield[0]+st->yield[1],100.0*(double)(st->yield[0]+st->yield[1])/(double)(yield[0]+yield[1]+trimmed[0]+trimmed[1])); } else { fprintf(f,"Single end reads. No. reads =\t%" PRIu64 "\n",st->nreads); if(!param->variable_read_length) { fprintf(f,"Read length:\t%" PRIu64 "\n",st->max_read_length[0]); fprintf(f,"Yield PF:\t%" PRIu64 "\n",yield[0]+trimmed[0]); fprintf(f,"Bases trimmed:\t=\t%" PRIu64 "\t(%.2f%%)\n", trimmed[0],100.0*(double)trimmed[0]/(double)(yield[0]+trimmed[0])); } else { fprintf(f,"Read length:\t%" PRIu64 " - %" PRIu64 "\n",min_rl[0],st->max_read_length[0]); fprintf(f,"Yield PF:\t%" PRIu64 "\n",yield[0]); } fprintf(f,"No calls =\t%" PRIu64 "\t(%.2f%%)\n",yield[0]-st->yield[0],100.0*(double)(yield[0]-st->yield[0])/(double)yield[0]); fprintf(f,"%s yield:\t%" PRIu64 "\t(%.2f%%)\n",param->variable_read_length?"Clean":"Trimmed",st->yield[0],100.0*(double)st->yield[0]/(double)(yield[0]+trimmed[0])); } } static void as_print_mapping_summary(FILE *f,as_param *param) { bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr); as_stats *st=param->stats[0]; bool paired_file=false; // Was the input file from a paired mapping if(paired==true && !param->input_files[1]) paired_file=true; uint64_t counts[4]={0,0,0,0}; dist_element *de; int j; for(de=st->insert_size;de;de=de->hh.next) { for(j=0;j<4;j++) counts[j]+=de->ct[j]; } double zcounts[4]; for(j=0;j<4;j++) zcounts[j]=(double)counts[j]; fputs("\nSingle end mapping summary\n\n",f); double z=(double)st->nreads; if(paired==true) { fprintf(f,"Uniquely mapping reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n", st->unique[0],100.0*(double)st->unique[0]/z, st->unique[1],100.0*(double)st->unique[1]/z, st->unique[0]+st->unique[1],100.0*(double)(st->unique[0]+st->unique[1])/(z+z)); uint64_t mult[3]; mult[0]=st->mapped[0]-st->unique[0]-st->ambiguous[0]; mult[1]=st->mapped[1]-st->unique[1]-st->ambiguous[1]; fprintf(f,"Multiply mapping reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n", mult[0],100.0*(double)mult[0]/z, mult[1],100.0*(double)mult[1]/z, mult[0]+mult[1],100.0*(double)(mult[0]+mult[1])/(z+z)); uint64_t unmap[3]; unmap[0]=st->nreads-st->mapped[0]; unmap[1]=st->nreads-st->mapped[1]; fprintf(f,"Unmapped reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n", unmap[0],100.0*(double)unmap[0]/z, unmap[1],100.0*(double)unmap[1]/z, unmap[0]+unmap[1],100.0*(double)(unmap[0]+unmap[1])/(z+z)); if(paired_file==false) { fprintf(f,"Ambiguous reads:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n\n", st->ambiguous[0],100.0*(double)st->ambiguous[0]/z, st->ambiguous[1],100.0*(double)st->ambiguous[1]/z, st->ambiguous[0]+st->ambiguous[1],100.0*(double)(st->ambiguous[0]+st->ambiguous[1])/(z+z)); } fprintf(f,"Reads with splitmaps:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n", st->reads_with_splitmaps[0],100.0*(double)st->reads_with_splitmaps[0]/z, st->reads_with_splitmaps[1],100.0*(double)st->reads_with_splitmaps[1]/z, st->reads_with_splitmaps[0]+st->reads_with_splitmaps[1],100.0*(double)(st->reads_with_splitmaps[0]+st->reads_with_splitmaps[1])/(z+z)); fprintf(f,"Reads with only splitmaps:\tRead 1 =\t%" PRIu64 "\t(%g%%)\tRead 2 =\t%" PRIu64 "\t(%g%%)\tTotal =\t%" PRIu64 "\t(%g%%)\n", st->reads_only_with_splitmaps[0],100.0*(double)st->reads_only_with_splitmaps[0]/z, st->reads_only_with_splitmaps[1],100.0*(double)st->reads_only_with_splitmaps[1]/z, st->reads_only_with_splitmaps[0]+st->reads_only_with_splitmaps[1],100.0*(double)(st->reads_only_with_splitmaps[0]+st->reads_only_with_splitmaps[1])/(z+z)); fputs("\nPaired end mapping summary\n\n",f); fprintf(f,"Uniquely mapping read pairs:\t%" PRIu64 "\t(%g%%)\n",st->paired_unique,100.0*(double)st->paired_unique/z); mult[2]=st->paired_mapped-st->paired_unique; fprintf(f,"Multiply mapping reads:\t%" PRIu64 "\t(%g%%)\n",mult[2],100.0*(double)mult[2]/z); unmap[2]=st->nreads-st->paired_mapped; fprintf(f,"Unmapped read pairs:\t%" PRIu64 "\t(%g%%)\n",unmap[2],100.0*(double)unmap[2]/z); fprintf(f,"Read pairs with splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_with_splitmaps[2],100.0*(double)st->reads_with_splitmaps[2]/z); fprintf(f,"Read pairs with only splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_only_with_splitmaps[2],100.0*(double)st->reads_only_with_splitmaps[2]/z); fputs("\nPair statistics (uniquely mapping read pairs only)\n\n",f); uint64_t cnt[4]={0,0,0,0}; double lim[3]={.25,.5,.75}; int state[4]={0,0,0,0}; int64_t Q[3][4]={{0,0,0,0},{0,0,0,0},{0,0,0,0}}; dist_element *de; int ct=0; for(de=st->insert_size;de && ct<4;de=de->hh.next) { int i; for(i=0;i<4;i++) { if(state[i]<3) { if((double)cnt[i]/zcounts[i]>=lim[state[i]]) { Q[state[i]++][i]=de->x; if(state[i]==3) ct++; } } cnt[i]+=de->ct[i]; } } double ztot=(double)(st->paired_type[0]+st->paired_type[1]+st->paired_type[2]); fprintf(f,"Read pairs on different strand (DS):\t%" PRIu64 "\t(%g%%)\n", st->paired_type[PAIR_TYPE_DS],100.0*(double)st->paired_type[PAIR_TYPE_DS]/ztot); fprintf(f,"Read pairs on same strand (SS):\t%" PRIu64 "\t(%g%%)\n", st->paired_type[PAIR_TYPE_SS],100.0*(double)st->paired_type[PAIR_TYPE_SS]/ztot); fprintf(f,"Read pairs on different contigs:\t%" PRIu64 "\t(%g%%)\n", st->paired_type[PAIR_TYPE_MM],100.0*(double)st->paired_type[PAIR_TYPE_MM]/ztot); fputs("\nInsert size summary\n\n",f); fprintf(f,"Selected unique read pairs:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[0],Q[0][0],Q[1][0],Q[2][0]); fprintf(f,"All unique read pairs:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[1],Q[0][1],Q[1][1],Q[2][1]); fprintf(f,"Selected unique read pairs with recovered read:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[2],Q[0][2],Q[1][2],Q[2][2]); fprintf(f,"Selected unique read pairs with split reads:\t(%g)\tQ1: %" PRId64 "\tMedian: %" PRId64 "\tQ3: %" PRId64 "\n",zcounts[3],Q[0][3],Q[1][3],Q[2][3]); } else { fprintf(f,"Uniquely mapping reads:\t%" PRIu64 "\t(%g%%)\n",st->unique[0],100.0*(double)st->unique[0]/z); uint64_t mult=st->mapped[0]-st->unique[0]-st->ambiguous[0]; fprintf(f,"Multiply mapping reads:\t%" PRIu64 "\t(%g%%)\n",mult,100.0*(double)mult/z); uint64_t unmap=st->nreads-st->mapped[0]; fprintf(f,"Unmapped reads:\t%" PRIu64 "\t(%g%%)\n",unmap,100.0*(double)unmap/z); fprintf(f,"Ambiguous mapping reads:\t%" PRIu64 "\t(%g%%)\n\n",st->ambiguous[0],100.0*(double)st->ambiguous[0]/z); fprintf(f,"Reads with splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_with_splitmaps[0],100.0*(double)st->reads_with_splitmaps[0]/z); fprintf(f,"Reads with only splitmaps:\t%" PRIu64 "\t(%g%%)\n",st->reads_only_with_splitmaps[0],100.0*(double)st->reads_only_with_splitmaps[0]/z); } } static void as_print_read_lengths(FILE *f,as_param *param) { as_stats *st=param->stats[0]; bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr); if(!st->max_read_length[0]) return; fprintf(f,"\n\nDistribution of reads lengths after trimming\n"); uint64_t i,l,j,k; if(paired) { fputs("Read length\tR1:n_reads\tR1:p\tR2:nreads\tR2:p\n",f); j=2; l=st->max_read_length[0]>st->max_read_length[1]?st->max_read_length[0]:st->max_read_length[1]; } else { fputs("Read length\tn_reads\tp\n",f); j=1; l=st->max_read_length[0]; } double tot[2]={0.0,0.0}; for(i=0;i<=l;i++) { for(k=0;k<j;k++) { if(i<=st->max_read_length[k]) tot[k]+=(double)st->read_length_stats[k][i]; } } uint64_t x[2]; for(i=0;i<=l;i++) { for(k=0;k<j;k++) x[k]=(i<=st->max_read_length[k]?st->read_length_stats[k][i]:0); if(paired) { if(x[0]||x[1]) { fprintf(f,"%" PRIu64 "\t%" PRIu64 "\t%.4f\t%" PRIu64 "\t%.4f\n",i,x[0],(double)x[0]/tot[0],x[1],(double)x[1]/tot[1]); } } else if(x[0]) { fprintf(f,"%" PRIu64 "\t%" PRIu64 "\t%.4f\n",i,x[0],(double)x[0]/tot[0]); } } } static void as_print_distance_file(as_param *param) { as_stats* st=param->stats[0]; gt_output_file *file; if(param->dist_file) { file=gt_output_file_new_compress(param->dist_file,UNSORTED_FILE,param->compress); } else { file=gt_output_stream_new_compress(stdout,UNSORTED_FILE,param->compress); } gt_cond_fatal_error(!file,FILE_OPEN,param->dist_file); FILE *fp=file->file; uint64_t counts[4]={0,0,0,0}; dist_element *de; int j; for(de=st->insert_size;de;de=de->hh.next) { for(j=0;j<4;j++) counts[j]+=de->ct[j]; } double zcounts[4]; for(j=0;j<4;j++) zcounts[j]=(double)counts[j]; fputs("Fragment size distribution (uniquely mapping reads):\n\n",fp); fputs("Size\tPaired\tAll\tRecovered\tSplit\tPaired_freq\tAll_freq\tRecovered_freq\tSplit_freq\n",fp); for(de=st->insert_size;de;de=de->hh.next) { fprintf(fp,"%" PRId64 "\t%" PRIu64 "\t%" PRIu64 "\t%" PRIu64 "\t%" PRIu64 "\t%g\t%g\t%g\t%g\n", de->x,de->ct[0],de->ct[1],de->ct[2],de->ct[3], (double)de->ct[0]/zcounts[0],(double)de->ct[1]/zcounts[1],(double)de->ct[2]/zcounts[2],(double)de->ct[3]/zcounts[3]); } gt_output_file_close(file); } static void as_print_duplicate_summary(FILE *fp,as_param *param) { as_stats* st=param->stats[0]; fprintf(fp,"\nOverall duplicate percentage = %g%%\nOptical duplicate fraction = %g\nNo. read pairs used = %"PRIu64"\n",100.0*st->duplicate_rate[0],st->duplicate_rate[1],st->duplicate_reads_used); fprintf(fp,"Estimated number of unique fragments in library = %"PRIu64"\n",st->unique_fragment_estimate); } static void as_print_detailed_duplicate_report(FILE *fp,as_param *param) { as_stats* st=param->stats[0]; int k=0; int i; double z=0.0; for(i=0;i<=DUP_LIMIT;i++) { uint64_t c=st->duplicate_counts[0][i]; if(c) { z+=(double)c; k=i; } } if(k) { fputs("\nDetailed duplicate report\nN_copies\tfreq\tprob\tW++\tW+-\tB++\tB+-\n",fp); for(i=0;i<=k;i++) { if(i==DUP_LIMIT) fputs(">=",fp); fprintf(fp,"%d\t%"PRIu64"\t%g\t%"PRIu64"\t%"PRIu64"\t%"PRIu64"\t%"PRIu64"\n",i+1,st->duplicate_counts[0][i],(double)st->duplicate_counts[0][i]/z, st->duplicate_counts[1][i],st->duplicate_counts[2][i],st->duplicate_counts[3][i],st->duplicate_counts[4][i]); } } } static void as_print_mismatch_report(FILE *fp,as_param *param) { const double ln10=log(10.0); as_stats* st=param->stats[0]; bool paired=gt_input_generic_parser_attributes_is_paired(param->parser_attr); // Collect overall error stats uint64_t mm_stats[2][MAX_QUAL+1],qual_stats[2][MAX_QUAL+1]; uint64_t mm_total[2]={0,0}; uint64_t qtotal[2]={0,0},total[2]={0,0},tv_stats[2]={0,0},ts_stats[2]={0,0},pbc_stats[2]={0,0},pbn_stats[2]={0,0}; uint64_t base_ct[2][5],base_ct1[2][5*(MAX_QUAL+1)],base_ctt[2][MAX_QUAL+1]; uint64_t i,j,k,nr; nr=paired?2:1; for(i=0;i<nr;i++) { for(j=0;j<5;j++) base_ct[i][j]=0; for(j=0;j<=MAX_QUAL;j++) { uint64_t *tp=st->mm_stats[i*(MAX_QUAL+1)+j]; uint64_t *tq=st->qual_stats[i*(MAX_QUAL+1)+j]; uint64_t tmp_mm=0,tmp=0; uint64_t tt[]={0,0,0,0,0}; for(k=0;k<st->max_read_length[i];k++) { uint64_t *tb=st->base_counts_by_cycle[i][k]+j*5; int k1; for(k1=0;k1<5;k1++) tt[k1]+=tb[k1]; tmp_mm+=tp[k]; tmp+=tq[k]; } uint64_t tt1=0; for(k=0;k<5;k++) { base_ct[i][k]+=tt[k]; base_ct1[i][j*5+k]=tt[k]; tt1+=tt[k]; } base_ctt[i][j]=tt1; tv_stats[i]+=st->tv_stats[i][j]; ts_stats[i]+=st->ts_stats[i][j]; pbc_stats[i]+=st->pbc_stats[i][j]; pbn_stats[i]+=st->pbn_stats[i][j]; mm_stats[i][j]=tmp_mm; qual_stats[i][j]=tmp; mm_total[i]+=tmp_mm; qtotal[i]+=tmp; } for(j=0;j<5;j++) total[i]+=base_ct[i][j]; } fputs("\nMismatch report (based on first alignment only)\n\n",fp); if(paired) { fprintf(fp,"Overall Mismatch percentage (Read 1 Read 2):\t%g%%\t%g%%\n",100.0*mm_total[0]/qtotal[0],100.0*mm_total[1]/qtotal[1]); fprintf(fp,"Overall base composition:\t(A:%.3f,C:%.3f,G:%.3f,T:%.3f,N:%.3f)\t(A:%.3f,C:%.3f,G:%.3f,T:%.3f,N:%.3f)\n", (double)base_ct[0][1]/total[0],(double)base_ct[0][2]/total[0],(double)base_ct[0][3]/total[0],(double)base_ct[0][4]/total[0],(double)base_ct[0][0]/total[0], (double)base_ct[1][1]/total[1],(double)base_ct[1][2]/total[1],(double)base_ct[1][3]/total[1],(double)base_ct[1][4]/total[1],(double)base_ct[1][0]/total[1]); fprintf(fp,"Overall transition:transversion ratio:\t%g\t%g\n",(double)ts_stats[0]/tv_stats[0],(double)ts_stats[1]/tv_stats[1]); fprintf(fp,"Overall probability of mismatch being a copy of previous base:\t%g\t%g\n",(double)pbc_stats[0]/(pbc_stats[0]+pbn_stats[0]),(double)pbc_stats[1]/(pbc_stats[1]+pbn_stats[1])); } else { fprintf(fp,"Overall mismatch percentage:\t%g%%\n",100.0*mm_total[0]/qtotal[0]); fprintf(fp,"Overall base composition:\t(A:%.3f,C:%.3f,G:%.3f,T:%.3f,N:%.3f)\n", (double)base_ct[0][1]/total[0],(double)base_ct[0][2]/total[0],(double)base_ct[0][3]/total[0],(double)base_ct[0][4]/total[0],(double)base_ct[0][0]/total[0]); fprintf(fp,"Overall transition:transversion ratio:\t%g\n",(double)ts_stats[0]/tv_stats[0]); fprintf(fp,"Overall probability of mismatch being a copy of previous base:\t%g\n",(double)pbc_stats[0]/(pbc_stats[0]+pbn_stats[0])); } for(i=0;i<nr;i++) { fprintf(fp,"\nMismatch quality profile - Read %"PRIu64"\n\n",i+1); fputs("Qual\tn_bases\tp(bases)\tcp(bases)\tn_mm\tp(mm)\t-log10_p(mm)\tts:tv\tp(pbc)\tp(A)\tp(C)\tp(G)\tp(T)\n",fp); uint64_t ttot=0; for(j=0;j<=MAX_QUAL;j++) { if(qual_stats[i][j]) { double z=(double)mm_stats[i][j]/qual_stats[i][j]; ttot+=qual_stats[i][j]; uint64_t tt=st->pbc_stats[i][j]+st->pbn_stats[i][j]; fprintf(fp,"%"PRIu64"\t%"PRIu64"\t%g\t%g\t%"PRIu64"\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\n",j,qual_stats[i][j],(double)qual_stats[i][j]/qtotal[i], (double)ttot/qtotal[i],mm_stats[i][j],z,-log(z)/ln10,st->tv_stats[i][j]?(double)st->ts_stats[i][j]/st->tv_stats[i][j]:0.0, tt?(double)st->pbc_stats[i][j]/tt:0.0,(double)base_ct1[i][j*5+1]/base_ctt[i][j],(double)base_ct1[i][j*5+2]/base_ctt[i][j], (double)base_ct1[i][j*5+3]/base_ctt[i][j],(double)base_ct1[i][j*5+4]/base_ctt[i][j]); } } fprintf(fp,"\nMismatch read position profile - Read %"PRIu64"\n\n",i+1); fputs("Pos\tn_bases\tp(bases)\tavg_qual\tn_mm\tp(mm)\tp(A)\tp(C)\tp(G)\tp(T)\tp(N)\n",fp); uint64_t len=st->max_read_length[i]; for(j=0;j<len;j++) { uint64_t mms[5]={0,0,0,0,0}; uint64_t *bc=st->base_counts_by_cycle[i][j]; uint64_t qs=0,mm=0; double qsmn=0.0; for(k=0;k<=MAX_QUAL;k++) { mm+=st->mm_stats[i*(MAX_QUAL+1)+k][j]; qs+=st->qual_stats[i*(MAX_QUAL+1)+k][j]; qsmn+=(double)k*st->qual_stats[i*(MAX_QUAL+1)+k][j]; int k1; for(k1=0;k1<5;k1++) mms[k1]+=bc[k*5+k1]; } if(qs) { qsmn/=(double)qs; uint64_t tt=0; for(k=0;k<5;k++) tt+=mms[k]; fprintf(fp,"%"PRIu64"\t%"PRIu64"\t%g\t%.1f\t%"PRIu64"\t%g\t%g\t%g\t%g\t%g\t%g\n",j,qs,(double)qs/qtotal[i],qsmn,mm,(double)mm/qs, (double)mms[1]/tt,(double)mms[2]/tt,(double)mms[3]/tt,(double)mms[4]/tt,(double)mms[0]/tt); } } } } static void as_print_stats(as_param *param) { gt_output_file *file; if(param->output_file) { file=gt_output_file_new_compress(param->output_file,UNSORTED_FILE,param->compress); } else { file=gt_output_stream_new_compress(stdout,UNSORTED_FILE,param->compress); } gt_cond_fatal_error(!file,FILE_OPEN,param->output_file); FILE *fp=file->file; if(gt_input_generic_parser_attributes_is_paired(param->parser_attr)) as_print_distance_file(param); as_print_yield_summary(fp,param); as_print_mapping_summary(fp,param); as_print_duplicate_summary(fp,param); as_print_mismatch_report(fp,param); as_print_read_lengths(fp,param); as_print_detailed_duplicate_report(fp,param); gt_output_file_close(file); } int main(int argc,char *argv[]) { int err=0,c; char *p,*p1; static struct option longopts[]={ {"reads",required_argument,0,'r'}, {"insert_dist",required_argument,0,'d'}, {"max_insert",required_argument,0,'m'}, {"min_insert",required_argument,0,'M'}, {"insert_dist",required_argument,0,'d'}, {"phage_lambda",required_argument,0,'P'}, {"phix174",required_argument,0,'X'}, {"paired",no_argument,0,'p'}, {"variable",no_argument,0,'V'}, {"ignore_id",no_argument,0,'i'}, {"mmap",no_argument,0,'w'}, {"fastq",no_argument,0,'F'}, {"solexa",no_argument,0,'S'}, {"gzip",no_argument,0,'z'}, {"bzip2",no_argument,0,'j'}, {"no-compress",no_argument,0,'Z'}, {"threads",required_argument,0,'t'}, {"qual_off",required_argument,0,'q'}, {"output",required_argument,0,'o'}, {"read_length",required_argument,0,'l'}, {"max_read_length",required_argument,0,'L'}, {"help",no_argument,0,'h'}, {"usage",no_argument,0,'h'}, {0,0,0,0} }; as_param param = { .input_files={NULL,NULL}, .output_file=NULL, .dist_file=NULL, .phage_lambda=NULL, .phix174=NULL, .mmap_input=false, .parser_attr=gt_input_generic_parser_attributes_new(false), .ignore_id=false, .compress=NONE, .min_insert=0, .max_insert=DEFAULT_MAX_INSERT, .variable_read_length=false, .read_length={0,0}, .max_read_length=MAX_READ_LENGTH, .num_threads=1, .qual_offset=DEFAULT_QUAL_OFFSET, }; while(!err && (c=getopt_long(argc,argv,"d:t:r:o:q:m:M:l:L:x:P:X:FSVzjZwpi?",longopts,0))!=-1) { switch(c) { case 'd': set_opt("insert_dist",&param.dist_file,optarg); break; case 'p': gt_input_generic_parser_attributes_set_paired(param.parser_attr,true); break; case 'o': set_opt("output",&param.output_file,optarg); break; case 'P': set_opt("phage_lambda",&param.phage_lambda,optarg); break; case 'X': set_opt("phix174",&param.phix174,optarg); break; case 'L': param.max_read_length=(uint64_t)strtoul(optarg,&p,10); break; case 'l': param.read_length[0]=(uint64_t)strtoul(optarg,&p,10); if(*p==',') param.read_length[1]=(uint64_t)strtoul(p+1,&p1,10); else param.read_length[1]=param.read_length[0]; break; case 'z': #ifdef HAVE_ZLIB param.compress=GZIP; #endif break; case 'j': #ifdef HAVE_BZLIB param.compress=BZIP2; #endif break; case 'Z': param.compress=NONE; break; case 'q': param.qual_offset=(int)strtol(optarg,&p,10); if(*p || param.qual_offset<0 || param.qual_offset>255) { fprintf(stderr,"Illegal quality value adjustment: '%s'\n",optarg); err=-7; } break; case 'm': param.max_insert=(uint64_t)strtoul(optarg,&p,10); break; case 'M': param.min_insert=(uint64_t)strtoul(optarg,&p,10); break; case 'F': param.qual_offset=QUAL_FASTQ; break; case 'S': param.qual_offset=QUAL_SOLEXA; break; case 'V': param.variable_read_length=true; break; case 'w': param.mmap_input=true; break; case 'i': param.ignore_id=true; break; case 't': #ifdef HAVE_OPENMP param.num_threads=atoi(optarg); #endif break; case 'r': if(param.input_files[0]) { fprintf(stderr,"multiple reads options: '%s' overwriting previous definition\n",optarg); param.input_files[0]=0; param.input_files[1]=0; } p=strchr(optarg,','); if(p) { *p++=0; if(strchr(p,',')) { fprintf(stderr,"Alignment files should be specified either in comma separated pairs (paired end) or individually (single end or paired alignment)\n"); err=-10; } else { param.input_files[0]=strdup(optarg); param.input_files[1]=strdup(p); } } else { param.input_files[0]=strdup(optarg); } break; fprintf(stderr,"Alignment files should be specified either in comma separated pairs (paired end) or individually (single end or paired alignment)\n"); break; case 'h': case '?': usage(stdout); exit(0); } } if(!param.phage_lambda) param.phage_lambda=strdup(PHAGE_LAMBDA); if(!param.phix174) param.phix174=strdup(PHIX174); as_set_output_files(&param); as_stats** stats=as_malloc(param.num_threads*sizeof(void *)); param.stats=stats; loc_hash *lh=0; // Do we have two map files as input (one for each read)? if(param.input_files[1]) { gt_input_generic_parser_attributes_set_paired(param.parser_attr,true); pthread_mutex_t mutex=PTHREAD_MUTEX_INITIALIZER; gt_input_file* input_file1=gt_input_file_open(param.input_files[0],param.mmap_input); gt_input_file* input_file2=gt_input_file_open(param.input_files[1],param.mmap_input); if(input_file1->file_format!=MAP || input_file2->file_format!=MAP) { gt_fatal_error_msg("Fatal error: paired files '%s','%s' are not in MAP format\n",param.input_files[0],param.input_files[1]); } #ifdef HAVE_OPENMP #pragma omp parallel num_threads(param.num_threads) { uint64_t tid=omp_get_thread_num(); #else { uint64_t tid=0; #endif gt_buffered_input_file* buffered_input1=gt_buffered_input_file_new(input_file1); gt_buffered_input_file* buffered_input2=gt_buffered_input_file_new(input_file2); gt_status error_code; gt_template *template=gt_template_new(); id_tag *idt=new_id_tag(); stats[tid]=as_stats_new(gt_input_generic_parser_attributes_is_paired(param.parser_attr)); stats[tid]->loc_hash=&lh; while(gt_input_map_parser_synch_blocks(buffered_input1,buffered_input2,&mutex)) { error_code=gt_input_map_parser_get_template(buffered_input1,template,NULL); if(error_code!=GT_IMP_OK) { gt_input_map_parser_get_template(buffered_input2,template,NULL); gt_error_msg("Error parsing file '%s'\n",param.input_files[0]); continue; } if(gt_template_get_num_blocks(template)!=1) { gt_error_msg("Error parsing files '%s','%s': wrong number of blocks\n",param.input_files[0],param.input_files[1]); continue; } gt_alignment *alignment2=gt_template_get_block_dyn(template,1); error_code=gt_input_map_parser_get_alignment(buffered_input2,alignment2,NULL); if (error_code!=GT_IMP_OK) { gt_error_msg("Error parsing file '%s'\n",param.input_files[1]); continue; } if(!(gt_string_nequals(template->tag,alignment2->tag,gt_string_get_length(template->tag)))) { gt_error_msg("Fatal ID mismatch ('%*s','%*s') parsing files '%s','%s'\n",PRIgts_content(template->tag),PRIgts_content(alignment2->tag),param.input_files[0],param.input_files[1]); break; } if(!param.ignore_id) { uint64_t idt_err=parse_id_tag(template->tag,idt); if(idt_err!=ID_TAG_OK) { gt_error_msg("Fatal error parsing ID '"PRIgts"'\n",PRIgts_content(template->tag)); break; } } gt_alignment *alignment1=gt_template_get_block(template,0); gt_mmap_attributes attr; gt_map *mmap[2]; GT_ALIGNMENT_ITERATE(alignment1,map1) { mmap[0]=map1; GT_ALIGNMENT_ITERATE(alignment2,map2) { mmap[1]=map2; gt_status gt_err; int64_t x=gt_template_get_insert_size(mmap,&gt_err,0,0); if(gt_err==GT_TEMPLATE_INSERT_SIZE_OK && x>=param.min_insert && x<=param.max_insert) { attr.distance=gt_map_get_global_distance(map1)+gt_map_get_global_distance(map2); attr.gt_score=GT_MAP_NO_GT_SCORE; gt_template_inc_counter(template,attr.distance); gt_template_add_mmap_ends(template,map1,map2,&attr); } } } as_collect_stats(template,stats[tid],&param,idt); } gt_template_delete(template); gt_buffered_input_file_close(buffered_input1); gt_buffered_input_file_close(buffered_input2); free_id_tag(idt); } gt_input_file_close(input_file1); gt_input_file_close(input_file2); } else { // Single file (could be single or paired end) gt_input_file* input_file=param.input_files[0]?gt_input_file_open(param.input_files[0],param.mmap_input):gt_input_stream_open(stdin); #ifdef HAVE_OPENMP #pragma omp parallel num_threads(param.num_threads) { uint64_t tid=omp_get_thread_num(); #else { uint64_t tid=0; #endif gt_buffered_input_file* buffered_input=gt_buffered_input_file_new(input_file); gt_status error_code; gt_template *template=gt_template_new(); stats[tid]=as_stats_new(gt_input_generic_parser_attributes_is_paired(param.parser_attr)); stats[tid]->loc_hash=&lh; id_tag *idt=new_id_tag(); while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,param.parser_attr))) { if (error_code!=GT_IMP_OK) { gt_error_msg("Error parsing file '%s'\n",param.input_files[0]); continue; } // For paired reads, insert single end mappings into alignments if(gt_input_generic_parser_attributes_is_paired(param.parser_attr)) { if(gt_template_get_num_blocks(template)!=2) { gt_fatal_error_msg("Fatal error: Expecting paired reads\n"); } gt_alignment *al[2]; al[0]=gt_template_get_block(template,0); al[1]=gt_template_get_block(template,1); gt_alignment_recalculate_counters(al[0]); gt_alignment_recalculate_counters(al[1]); } if(!param.ignore_id) { uint64_t idt_err=parse_id_tag(template->tag,idt); if(idt_err!=ID_TAG_OK) { gt_error_msg("Fatal error parsing ID '"PRIgts"'\n",PRIgts_content(template->tag)); break; } } as_collect_stats(template,stats[tid],&param,idt); } // Clean gt_template_delete(template); gt_buffered_input_file_close(buffered_input); free_id_tag(idt); } gt_input_file_close(input_file); } pthread_t stats_merge; if(pthread_create(&stats_merge,NULL,as_merge_stats,&param)) { gt_error_msg("Fatal error - could not create new thread\n"); exit(-1); } pthread_t calc_dup; if(pthread_create(&calc_dup,NULL,as_calc_duplicate_rate,&param)) { gt_error_msg("Fatal error - could not create new thread\n"); exit(-1); } pthread_join(calc_dup,NULL); pthread_join(stats_merge,NULL); as_print_stats(&param); as_stats_free(stats[0]); free(stats); return err; }
fig6.22-overlap-comp-io.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <float.h> #include <malloc.h> #include <unistd.h> #include <time.h> #define TRUE 1 #define FALSE 0 #define FABS(x) (x < 0 ? -x : x) #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int DefValN = 6; int DefValM = 10; char file_read[] = "fig6.22-file-io.bin"; enum STATUS {UNDEFINED, READ_IN_PROGRESS, READ_FINISHED, PROCESSING_IN_PROGRESS, PROCESSING_FINISHED} *execution_state; void read_input(int i); void signal_read(int i); void wait_read(int i); void process_data(int i); void signal_processed(int i); void wait_processed(int i); void write_output(int i); void do_compute(int i,int j); void get_cmd_line_options(int, char **); void init_memory(); void init_data(); void generate_input_file(); void compute_reference_results(); void print_header(); int check_results(); void print_state_array(int, char *, int); double *a, *b, **c, **ref; FILE *fp_write; int verbose; int N, M; int main(int argc, char **argv) { int error_count; /* ------------------------------------------------------------ This program runs fine using one thread, but if we do execute in parallel, at least 3 threads are needed. ------------------------------------------------------------ */ #ifdef _OPENMP int thread_count_error; (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(3); (void) omp_set_nested(TRUE); if (! omp_get_nested()) {printf("Warning: nested parallelism not set\n");} #pragma omp parallel shared(thread_count_error) { #pragma omp single { if ( omp_get_num_threads() < 3 ) { printf("Fatal error - At least 3 threads are needed, but only %d available\n", omp_get_num_threads()); thread_count_error = TRUE; } else { thread_count_error = FALSE; } } } /*-- End of parallel region --*/ if ( thread_count_error == TRUE ) return(-1); #endif /* ------------------------------------------------------------ Initial, serial, phase. Get the command line options, allocate memory, initialize data, generate input file and print the header. ------------------------------------------------------------ */ (void) get_cmd_line_options(argc,argv); if (verbose) printf("Allocating memory for data structures\n"); (void) init_memory(); if (verbose) printf("Memory for data structures allocated\n"); if (verbose) printf("Initializing data structures\n"); (void) init_data(); if (verbose) printf("Data structures initialized\n"); (void) generate_input_file(); (void) compute_reference_results(); (void) print_header(); /* ------------------------------------------------------------ This implements the pipeline, overlapping I/O and computations. Each section block handles a specific phase. ------------------------------------------------------------ */ #pragma omp parallel sections { #pragma omp section { if (verbose) printf("TID = %d - in main: performs the read operations\n",omp_get_thread_num()); for (int i=0; i<N; i++) { (void) read_input(i); (void) signal_read(i); } } #pragma omp section { if (verbose) printf("TID = %d - in main: performs the computations\n",omp_get_thread_num()); for (int i=0; i<N; i++) { (void) wait_read(i); (void) process_data(i); (void) signal_processed(i); } } #pragma omp section { if (verbose) printf("TID = %d - in main: performs the write operations\n",omp_get_thread_num()); for (int i=0; i<N; i++) { (void) wait_processed(i); (void) write_output(i); } } } /*-- End of parallel sections --*/ if ( (error_count = check_results()) == 0 ) { printf("Program executed successfully\n"); } else { printf("FATAL ERROR: found %d differences in the result(s)\n",error_count); } free(a); free(b); free(c); free(ref); free(execution_state); return(0); } /*-- End of main program --*/ void read_input(int i) { FILE *fp_read; execution_state[i] = READ_IN_PROGRESS; #pragma omp flush print_state_array(omp_get_thread_num(),"read_input",i); /* ------------------------------------------------------------ Note that this is clumsy by design in order to get some time spent in the I/O part. ------------------------------------------------------------ */ if ( (fp_read = fopen(file_read,"r")) != NULL ) { if ( fseek(fp_read,(long) i*2*sizeof(double), SEEK_SET) == 0 ) { if ( fread(&a[i],sizeof(double),1,fp_read) != 1 ) { perror("read_input: array a"); exit(1); } if ( fread(&b[i],sizeof(double),1,fp_read) != 1 ) { perror("read_input: array b"); exit(1); } if (verbose) { printf("TID = %d - in read_input: a[%d]=%f b[%d]=%f\n", omp_get_thread_num(),i,a[i],i,b[i]); } } else { perror("read_input: seek error"); exit(1); } fclose(fp_read); } else { perror("read_input: open file for read"); } } /*-- End of read_input --*/ void signal_read(int i) { print_state_array(omp_get_thread_num(),"signal_read",i); execution_state[i] = READ_FINISHED; #pragma omp flush print_state_array(omp_get_thread_num(),"signal_read",i); } /*-- End of signal_read --*/ void wait_read(int i) { print_state_array(omp_get_thread_num(),"wait_read",i); #pragma omp flush while ( execution_state[i] != READ_FINISHED ) { print_state_array(omp_get_thread_num(),"wait_read",i); system("sleep 1"); #pragma omp flush } print_state_array(omp_get_thread_num(),"wait_read",i); } /*-- End of wait_read --*/ void process_data(int i) { int TID_LVL_1 = omp_get_thread_num(); execution_state[i] = PROCESSING_IN_PROGRESS; #pragma omp flush print_state_array(TID_LVL_1,"process_data",i); #pragma omp parallel for num_threads(4) for (int j=0 ; j<M; j++) { if (verbose) printf("TID:subTID = %d:%d - in process_data: iteration j=%d\n", TID_LVL_1,omp_get_thread_num(),j); do_compute(i,j); } } /*-- End of process_data --*/ void do_compute(int i,int j) { c[i][j] += a[i] + b[i]; if (verbose) { printf("\tin do_compute: updated c[%d][%d]\n",i,j); } } /*-- End of do_compute --*/ void signal_processed(int i) { execution_state[i] = PROCESSING_FINISHED; #pragma omp flush print_state_array(omp_get_thread_num(),"signal_processed",i); } /*-- End of signal_processed --*/ void wait_processed(int i) { print_state_array(omp_get_thread_num(),"wait_processed",i); #pragma omp flush while ( execution_state[i] != PROCESSING_FINISHED ) { print_state_array(omp_get_thread_num(),"wait_processed",i); system("sleep 1"); #pragma omp flush } print_state_array(omp_get_thread_num(),"wait_processed",i); } /*-- End of wait_processed --*/ void write_output(int i) { int CutOffPrint = 9; /*-- Limits number of values printed --*/ print_state_array(omp_get_thread_num(),"write_output",i); if (verbose) { printf("TID = %d - in write_output: i=%d\n",omp_get_thread_num(),i); printf("\tc[%d][]: ",i); if ( M < CutOffPrint ) { for (int j=0; j<M; j++) printf("%.2f ",c[i][j]); } else { for (int j=0; j<CutOffPrint; j++) printf("%.2f ",c[i][j]); printf("... %.2f ",c[i][M-1]); } printf("\n"); } } /*-- End of write_output --*/ void generate_input_file() { if ( (fp_write = fopen(file_read,"w")) != NULL ) { for(int i=0; i<N; i++) { fwrite(&a[i],sizeof(double),1,fp_write); fwrite(&b[i],sizeof(double),1,fp_write); } fclose(fp_write); } else { perror("generate_input_file: open file for write"); exit(1); } } /*-- End of generate_input --*/ void compute_reference_results() { for (int i=0; i<N; i++) for (int j=0; j<M; j++) ref[i][j] = 0.0; for (int i=0; i<N; i++) for (int j=0; j<M; j++) ref[i][j] += a[i] + b[i]; } /*-- End of compute_reference_results --*/ int check_results() { double rel_error; double TOL = DBL_EPSILON*10.0; int error_count = 0; for (int i=0; i<N; i++) for (int j=0; j<M; j++) { if (FABS(ref[i][j]) > DBL_MAX) rel_error = FABS( (c[i][j] - ref[i][j])/ref[i][j] ); else rel_error = FABS( c[i][j] - ref[i][j] ); if (rel_error > TOL ) { error_count++; printf("c[%d][%d] = %f ref[%d][%d] = %f rel. error = %e\n", i,j,c[i][j],i,j,ref[i][j],rel_error); } } return(error_count); } /*-- End of check_results --*/ void init_memory() { if ( (a = (double *) malloc(N*sizeof(double))) == NULL ) { perror("init_memory: memory allocation failure for a"); exit(1); } else { if (verbose) printf("\tAllocated memory for a\n"); } if ( (b = (double *) malloc(N*sizeof(double))) == NULL ) { perror("init_memory: memory allocation failure for b"); exit(1); } else { if (verbose) printf("\tAllocated memory for b\n"); } if ( (c = (double **) malloc(N*sizeof(double))) == NULL ) { perror("init_memory: memory allocation failure for c"); exit(1); } else { for (int i=0; i<N; i++) if ( (c[i] = malloc(M*sizeof(double))) == NULL ) { perror("init_memory: memory allocation failure for c"); exit(1); } } if (verbose) printf("\tAllocated memory for c\n"); if ( (ref = (double **) malloc(N*sizeof(double))) == NULL ) { perror("init_memory: memory allocation failure for ref"); exit(1); } else { for (int i=0; i<N; i++) if ( (ref[i] = malloc(M*sizeof(double))) == NULL ) { perror("init_memory: memory allocation failure for ref"); exit(1); } } if (verbose) printf("\tAllocated memory for ref\n"); if ( (execution_state = malloc(N*sizeof(int))) == NULL ) { perror("init_memory: memory allocation failure for execution_state"); exit(1); } else { if (verbose) printf("\tAllocated memory for execution_state\n"); } } /*-- End of init_memory --*/ void init_data() { for (int i=0; i<N; i++) a[i] = i+1; for (int i=0; i<N; i++) b[i] = a[N-1] + i+1; for (int i=0; i<N; i++) for (int j=0; j<M; j++) c[i][j] = 0.0; for (int i=0; i<N; i++) execution_state[i] = UNDEFINED; #pragma omp flush } /*-- End of init_data --*/ void get_cmd_line_options(int argc, char **argv) { char optstring[]="N:M:hv"; int c; extern char *optarg; extern int opterr; N = DefValN; M = DefValM; verbose = FALSE; if ( argc > 1 ) { opterr = 0; while ((c = getopt(argc, argv, optstring)) != EOF) { switch (c) { case 'N': N = atoi(optarg); break; case 'M': M = atoi(optarg); break; case 'v': verbose = TRUE; break; case 'h': printf("Usage:%s [-N <n>] [-M <m>] [-v] [-h]\n\n",argv[0]); printf("Options supported:\n"); printf(" <N> problem size (optional - default is %d)\n",DefValN); printf(" <M> problem size (optional - default is %d)\n",DefValM); printf(" <v> activates verbose mode (optional- by default it is off)\n"); printf(" <h> display this usage overview\n"); exit(0); break; case '?': printf("Warning: incomplete or incorrect option(s) ignored\n"); break; } /*-- End of switch over options --*/ } /*-- End of while --*/ } /*-- End of if over argc --*/ if (verbose) printf("N=%d M=%d\n",N,M); return; } /*-- End of get_cmd_line_options --*/ void print_state_array(int TID, char *name, int i) { static int first = TRUE; #pragma omp critical { if (first) { first = FALSE; #pragma omp flush printf("Thread ID Function Execution Status Array\n"); printf(" Value of i:"); for (int j=0; j<N; j++) printf("%3d",j); printf("\n\n"); } printf("%6d %-20s",TID,name); if ( i >= 0 ) { for (int j=0; j<i; j++) { #pragma omp flush printf(" %2d",execution_state[j]); } #pragma omp flush printf(" *%1d",execution_state[i]); } for (int j=i+1; j<N; j++) { #pragma omp flush printf(" %2d",execution_state[j]); } printf("\n"); } /*-- End of critical section --*/ return; } /*-- End of print_state_array --*/ void print_header() { printf("This program demonstrates how I/O can be overlapped with computations.\n"); printf("Several options are supported. Use the -h option for an overview.\n"); printf("\n"); printf("There are 3 distinct phases. Each phase is assigned to a different thread.\n"); printf("Correct execution is independent of the assignment of a phase to a specific thread.\n"); printf("\n"); printf("These are the different phases:\n"); printf("Input phase: read_input - signal_read\n"); printf("Computational phase: wait_read - process_data - signal_processed\n"); printf("Output phase: wait_processed - write_output\n"); printf("\n"); printf("The entire operation is splitted into chunks. A specific chunk is represented\n"); printf("by the value of iteration i. In total there are %d chunks\n",N); printf("\n"); printf("An internal status flag is used to pass on information between the threads\n"); printf("regarding a specific phase.\n"); printf("\n"); printf("Legend for status flag\n"); printf("\t%d - UNDEFINED\n" ,UNDEFINED); printf("\t%d - READ_IN_PROGRESS\n" ,READ_IN_PROGRESS); printf("\t%d - READ_FINISHED\n" ,READ_FINISHED); printf("\t%d - PROCESSING_IN_PROGRESS\n",PROCESSING_IN_PROGRESS); printf("\t%d - PROCESSING_FINISHED\n" ,PROCESSING_FINISHED); printf("\n"); printf("The table below displays what function a specific thread is executing,\n"); printf("as well as the value of the status flag for a values of i.\n"); printf("\n"); printf("The * symbol indicates the current value of i the function is working on\n"); printf("\n"); print_state_array(omp_get_thread_num(),"initialization",-1); } /*-- End of print_header --*/
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int check_results(double* A){ for (int i = 0 ; i < N ; i++){ if (A[i] != TRIALS){ printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); return 0; } } return 1; } int check_results_priv(double *A, double *B){ for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS*3) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]); return 0; } if (B[i] != TRIALS*7) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]); return 0; } } return 1; } #define CODE() \ ZERO(A); \ success = 0; \ for (int t = 0 ; t < TRIALS ; t++) { \ _Pragma("omp target") \ _Pragma("omp teams distribute CLAUSES") \ for (int i = 0 ; i < N ; i++){ \ A[i] += C[i]; \ } \ } \ success += check_results(A); \ if (success == expected) \ printf("Succeeded\n"); #define CODE_PRIV() \ ZERO(A); \ ZERO(B); \ p = 2.0; \ q = 4.0; \ success = 0; \ for (int t = 0 ; t < TRIALS ; t++) { \ _Pragma("omp target") \ _Pragma("omp teams distribute CLAUSES") \ for (int i = 0 ; i < N ; i++){ \ p = 3; \ q = 7; \ A[i] += p; \ B[i] += q; \ } \ } \ success += check_results_priv(A, B); \ if (success == expected) \ printf("Succeeded\n"); int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; int fail = 0; int expected = 1; int success = 0; int chunkSize; double p = 2.0, q = 4.0; int nte, tl, blockSize; INIT(); // ************************** // Series 1: no dist_schedule // ************************** // // Test: #iterations == #teams // printf("iterations = teams\n"); #define CLAUSES num_teams(992) CODE() #undef CLAUSES printf("iterations > teams\n"); #define CLAUSES num_teams(256) CODE() #undef CLAUSES printf("iterations < teams\n"); #define CLAUSES num_teams(1024) CODE() #undef CLAUSES printf("num_teams(512) dist_schedule(static,1)\n"); #define CLAUSES num_teams(512) dist_schedule(static, 1) CODE() #undef CLAUSES printf("num_teams(512) dist_schedule(static,512)\n"); #define CLAUSES num_teams(512) dist_schedule(static, 512) CODE() #undef CLAUSES printf("num_teams(512) dist_schedule(static, chunkSize)\n"); chunkSize = N / 10; #define CLAUSES num_teams(512) dist_schedule(static, chunkSize) CODE() #undef CLAUSES printf("num_teams(1024) dist_schedule(static, chunkSize)\n"); chunkSize = N / 10; #define CLAUSES num_teams(1024) dist_schedule(static, chunkSize) CODE() #undef CLAUSES printf("num_teams(1024) dist_schedule(static, 1)\n"); #define CLAUSES num_teams(1024) dist_schedule(static, 1) CODE() #undef CLAUSES printf("num_teams(3) dist_schedule(static, 1)\n"); #define CLAUSES num_teams(3) dist_schedule(static, 1) CODE() #undef CLAUSES printf("num_teams(3) dist_schedule(static, 3)\n"); #define CLAUSES num_teams(3) dist_schedule(static, 3) CODE() #undef CLAUSES printf("num_teams(10) dist_schedule(static, 99)\n"); #define CLAUSES num_teams(10) dist_schedule(static, 99) CODE() #undef CLAUSES printf("num_teams(256) dist_schedule(static, 992)\n"); #define CLAUSES num_teams(256) dist_schedule(static, 992) CODE() #undef CLAUSES #if 0 printf("num_teams(256) private(p,q)\n"); #define CLAUSES num_teams(256) private(p,q) CODE_PRIV() #undef CLAUSES #endif // // Test: firstprivate // #if 0 printf("num_teams(64) firstprivate(p, q)\n"); ZERO(A); ZERO(B); p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation #pragma omp teams distribute num_teams(64) firstprivate(p, q) for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team) q += 7.0; A[i] += p; B[i] += q; } } for(int i = 0 ; i < 128 ; i++) { if (i % 2 == 0) { if (A[i] != (2.0+3.0)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]); fail = 1; } } else { if (A[i] != (2.0+3.0*2)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0*2)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]); fail = 1; } } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); #endif // // Test: lastprivate // printf("num_teams(10) lastprivate(lastpriv)\n"); success = 0; int lastpriv = -1; #pragma omp target map(tofrom:lastpriv) #pragma omp teams distribute num_teams(10) lastprivate(lastpriv) for(int i = 0 ; i < omp_get_num_teams() ; i++) lastpriv = omp_get_team_num(); if(lastpriv != 9) { printf("lastpriv value is %d and should have been %d\n", lastpriv, 9); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // *************************** // // Series 4: with parallel for // // *************************** // // Test: simple blocking loop // printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n"); success = 0; ZERO(A); ZERO(B); nte = 32; tl = 64; blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams distribute num_teams(nte) thread_limit(tl) for(int j = 0 ; j < 256 ; j += blockSize) { #pragma omp parallel for for(int i = j ; i < j+blockSize; i++) { A[i] += B[i] + C[i]; } } } for(int i = 0 ; i < 256 ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: blocking loop where upper bound is not a multiple of tl*nte // printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n"); success = 0; ZERO(A); ZERO(B); nte = 32; tl = 64; blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams distribute num_teams(nte) thread_limit(tl) for(int j = 0 ; j < 510 ; j += blockSize) { int ub = (j+blockSize < 510) ? (j+blockSize) : 512; #pragma omp parallel for for(int i = j ; i < ub; i++) { A[i] += B[i] + C[i]; } } } for(int i = 0 ; i < 256 ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // ************************** // Series 5: collapse // ************************** // // Test: 2 loops // printf("num_teams(512) collapse(2)\n"); success = 0; double * S = malloc(N*N*sizeof(double)); double * T = malloc(N*N*sizeof(double)); double * U = malloc(N*N*sizeof(double)); for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { S[i*N+j] = 0.0; T[i*N+j] = 1.0; U[i*N+j] = 2.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) #pragma omp teams distribute num_teams(512) collapse(2) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t } for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (S[i*N+j] != TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: 3 loops // printf("num_teams(512) collapse(3)\n"); success = 0; int M = N/8; double * V = malloc(M*M*M*sizeof(double)); double * Z = malloc(M*M*M*sizeof(double)); for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) { V[i*M*M+j*M+k] = 2.0; Z[i*M*M+j*M+k] = 3.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) #pragma omp teams distribute num_teams(512) collapse(3) for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t } for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
Clustering.h
// // Copyright (C) 2015-2020 Yahoo Japan Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #include "NGT/Index.h" using namespace std; #if defined(NGT_AVX_DISABLED) #define NGT_CLUSTER_NO_AVX #else #if defined(__AVX2__) #define NGT_CLUSTER_AVX2 #else #define NGT_CLUSTER_NO_AVX #endif #endif #if defined(NGT_CLUSTER_NO_AVX) // #warning "*** SIMD is *NOT* available! ***" #else #include <immintrin.h> #endif #include <omp.h> #include <random> namespace NGT { class Clustering { public: enum InitializationMode { InitializationModeHead = 0, InitializationModeRandom = 1, InitializationModeKmeansPlusPlus = 2 }; enum ClusteringType { ClusteringTypeKmeansWithNGT = 0, ClusteringTypeKmeansWithoutNGT = 1, ClusteringTypeKmeansWithIteration = 2, ClusteringTypeKmeansWithNGTForCentroids = 3 }; class Entry { public: Entry() : vectorID(0), centroidID(0), distance(0.0) { } Entry(size_t vid, size_t cid, double d) : vectorID(vid), centroidID(cid), distance(d) { } bool operator<(const Entry& e) const { return distance > e.distance; } uint32_t vectorID; uint32_t centroidID; double distance; }; class DescendingEntry { public: DescendingEntry(size_t vid, double d) : vectorID(vid), distance(d) { } bool operator<(const DescendingEntry& e) const { return distance < e.distance; } size_t vectorID; double distance; }; class Cluster { public: Cluster(std::vector<float>& c) : centroid(c), radius(0.0) { } Cluster(const Cluster& c) { *this = c; } Cluster& operator=(const Cluster& c) { members = c.members; centroid = c.centroid; radius = c.radius; return *this; } std::vector<Entry> members; std::vector<float> centroid; double radius; }; Clustering(InitializationMode im = InitializationModeHead, ClusteringType ct = ClusteringTypeKmeansWithNGT, size_t mi = 100) : clusteringType(ct), initializationMode(im), maximumIteration(mi) { initialize(); } void initialize() { epsilonFrom = 0.12; epsilonTo = epsilonFrom; epsilonStep = 0.04; resultSizeCoefficient = 5; } static void convert(std::vector<std::string>& strings, std::vector<float>& vector) { vector.clear(); for (auto it = strings.begin(); it != strings.end(); ++it) { vector.push_back(stod(*it)); } } static void extractVector(const std::string& str, std::vector<float>& vec) { std::vector<std::string> tokens; NGT::Common::tokenize(str, tokens, " \t"); convert(tokens, vec); } static void loadVectors(const std::string& file, std::vector<std::vector<float> >& vectors) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadVectors::Cannot open " + file); } std::string line; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); vectors.push_back(v); } } static void saveVectors(const std::string& file, std::vector<std::vector<float> >& vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { std::vector<float>& v = *vit; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } static void saveVector(const std::string& file, std::vector<size_t>& vectors) { std::ofstream os(file); for (auto vit = vectors.begin(); vit != vectors.end(); ++vit) { os << *vit << std::endl; } } static void loadClusters(const std::string& file, std::vector<Cluster>& clusters, size_t numberOfClusters = 0) { std::ifstream is(file); if (!is) { throw std::runtime_error("loadClusters::Cannot open " + file); } std::string line; while (getline(is, line)) { std::vector<float> v; extractVector(line, v); clusters.push_back(v); if ((numberOfClusters != 0) && (clusters.size() >= numberOfClusters)) { break; } } if ((numberOfClusters != 0) && (clusters.size() < numberOfClusters)) { std::cerr << "initial cluster data are not enough. " << clusters.size() << ":" << numberOfClusters << std::endl; exit(1); } } #if !defined(NGT_CLUSTER_NO_AVX) static double sumOfSquares(float* a, float* b, size_t size) { __m256 sum = _mm256_setzero_ps(); float* last = a + size; float* lastgroup = last - 7; while (a < lastgroup) { __m256 v = _mm256_sub_ps(_mm256_loadu_ps(a), _mm256_loadu_ps(b)); sum = _mm256_add_ps(sum, _mm256_mul_ps(v, v)); a += 8; b += 8; } __attribute__((aligned(32))) float f[8]; _mm256_store_ps(f, sum); double s = f[0] + f[1] + f[2] + f[3] + f[4] + f[5] + f[6] + f[7]; while (a < last) { double d = *a++ - *b++; s += d * d; } return s; } #else // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double sumOfSquares(float* a, float* b, size_t size) { double csum = 0.0; float* x = a; float* y = b; for (size_t i = 0; i < size; i++) { double d = (double)*x++ - (double)*y++; csum += d * d; } return csum; } #endif // !defined(NGT_AVX_DISABLED) && defined(__AVX__) static double distanceL2(std::vector<float>& vector1, std::vector<float>& vector2) { return sqrt(sumOfSquares(&vector1[0], &vector2[0], vector1.size())); } static double distanceL2(std::vector<std::vector<float> >& vector1, std::vector<std::vector<float> >& vector2) { assert(vector1.size() == vector2.size()); double distance = 0.0; for (size_t i = 0; i < vector1.size(); i++) { distance += distanceL2(vector1[i], vector2[i]); } distance /= (double)vector1.size(); return distance; } static double meanSumOfSquares(std::vector<float>& vector1, std::vector<float>& vector2) { return sumOfSquares(&vector1[0], &vector2[0], vector1.size()) / (double)vector1.size(); } static void subtract(std::vector<float>& a, std::vector<float>& b) { assert(a.size() == b.size()); auto bit = b.begin(); for (auto ait = a.begin(); ait != a.end(); ++ait, ++bit) { *ait = *ait - *bit; } } static void getInitialCentroidsFromHead(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); for (size_t i = 0; i < size; i++) { clusters.push_back(Cluster(vectors[i])); } } static void getInitialCentroidsRandomly(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t size, size_t seed) { clusters.clear(); std::random_device rnd; if (seed == 0) { seed = rnd(); } std::mt19937 mt(seed); for (size_t i = 0; i < size; i++) { size_t idx = mt() * vectors.size() / mt.max(); if (idx >= size) { i--; continue; } clusters.push_back(Cluster(vectors[idx])); } assert(clusters.size() == size); } static void getInitialCentroidsKmeansPlusPlus(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t size) { size = size > vectors.size() ? vectors.size() : size; clusters.clear(); std::random_device rnd; std::mt19937 mt(rnd()); size_t idx = (long long)mt() * (long long)vectors.size() / (long long)mt.max(); clusters.push_back(Cluster(vectors[idx])); NGT::Timer timer; for (size_t k = 1; k < size; k++) { double sum = 0; std::priority_queue<DescendingEntry> sortedObjects; // get d^2 and sort #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; double mind = DBL_MAX; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); d *= d; if (d < mind) { mind = d; } } #pragma omp critical { sortedObjects.push(DescendingEntry(distance(vectors.begin(), vit), mind)); sum += mind; } } double l = (double)mt() / (double)mt.max() * sum; while (!sortedObjects.empty()) { sum -= sortedObjects.top().distance; if (l >= sum) { clusters.push_back(Cluster(vectors[sortedObjects.top().vectorID])); break; } sortedObjects.pop(); } } } static void assign(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, size_t clusterSize = std::numeric_limits<size_t>::max()) { // compute distances to the nearest clusters, and construct heap by the distances. NGT::Timer timer; timer.start(); std::vector<Entry> sortedObjects(vectors.size()); #pragma omp parallel for for (size_t vi = 0; vi < vectors.size(); vi++) { auto vit = vectors.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(*vit, (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } sortedObjects[vi] = Entry(vi, mincidx, mind); } } std::sort(sortedObjects.begin(), sortedObjects.end()); // clear for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } // distribute objects to the nearest clusters in the same size constraint. for (auto soi = sortedObjects.rbegin(); soi != sortedObjects.rend();) { Entry& entry = *soi; if (entry.centroidID >= clusters.size()) { std::cerr << "Something wrong. " << entry.centroidID << ":" << clusters.size() << std::endl; soi++; continue; } if (clusters[entry.centroidID].members.size() < clusterSize) { clusters[entry.centroidID].members.push_back(entry); soi++; } else { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() >= clusterSize) { continue; } double d = distanceL2(vectors[entry.vectorID], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } entry = Entry(entry.vectorID, mincidx, mind); int pt = distance(sortedObjects.rbegin(), soi); std::sort(sortedObjects.begin(), soi.base()); soi = sortedObjects.rbegin() + pt; assert(pt == distance(sortedObjects.rbegin(), soi)); } } moveFartherObjectsToEmptyClusters(clusters); } static void moveFartherObjectsToEmptyClusters(std::vector<Cluster>& clusters) { size_t emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; double max = 0.0; auto maxit = clusters.begin(); for (auto scit = clusters.begin(); scit != clusters.end(); ++scit) { if ((*scit).members.size() >= 2 && (*scit).members.back().distance > max) { maxit = scit; max = (*scit).members.back().distance; } } (*cit).members.push_back((*maxit).members.back()); (*cit).members.back().centroidID = distance(clusters.begin(), cit); (*maxit).members.pop_back(); } } emptyClusterCount = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { if ((*cit).members.size() == 0) { emptyClusterCount++; } } } static void assignWithNGT(NGT::Index& index, std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, float& radius, size_t& resultSize, float epsilon = 0.12, size_t notRetrievedObjectCount = 0) { size_t dataSize = vectors.size(); assert(index.getObjectRepositorySize() - 1 == vectors.size()); vector<vector<Entry> > results(clusters.size()); #pragma omp parallel for for (size_t ci = 0; ci < clusters.size(); ci++) { auto cit = clusters.begin() + ci; NGT::ObjectDistances objects; // result set NGT::Object* query = 0; query = index.allocateObject((*cit).centroid); // set search prameters. NGT::SearchContainer sc(*query); // search parametera container. sc.setResults(&objects); // set the result set. sc.setEpsilon(epsilon); // set exploration coefficient. if (radius > 0.0) { sc.setRadius(radius); sc.setSize(dataSize / 2); } else { sc.setSize(resultSize); // the number of resultant objects. } index.search(sc); results[ci].reserve(objects.size()); for (size_t idx = 0; idx < objects.size(); idx++) { size_t oidx = objects[idx].id - 1; results[ci].push_back(Entry(oidx, ci, objects[idx].distance)); } index.deleteObject(query); } size_t resultCount = 0; for (auto ri = results.begin(); ri != results.end(); ++ri) { resultCount += (*ri).size(); } vector<Entry> sortedResults; sortedResults.reserve(resultCount); for (auto ri = results.begin(); ri != results.end(); ++ri) { auto end = (*ri).begin(); for (; end != (*ri).end(); ++end) { } std::copy((*ri).begin(), end, std::back_inserter(sortedResults)); } vector<bool> processedObjects(dataSize, false); for (auto i = sortedResults.begin(); i != sortedResults.end(); ++i) { processedObjects[(*i).vectorID] = true; } notRetrievedObjectCount = 0; vector<uint32_t> notRetrievedObjectIDs; for (size_t idx = 0; idx < dataSize; idx++) { if (!processedObjects[idx]) { notRetrievedObjectCount++; notRetrievedObjectIDs.push_back(idx); } } sort(sortedResults.begin(), sortedResults.end()); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { (*cit).members.clear(); } for (auto i = sortedResults.rbegin(); i != sortedResults.rend(); ++i) { size_t objectID = (*i).vectorID; size_t clusterID = (*i).centroidID; if (processedObjects[objectID]) { processedObjects[objectID] = false; clusters[clusterID].members.push_back(*i); clusters[clusterID].members.back().centroidID = clusterID; radius = (*i).distance; } } vector<Entry> notRetrievedObjects(notRetrievedObjectIDs.size()); #pragma omp parallel for for (size_t vi = 0; vi < notRetrievedObjectIDs.size(); vi++) { auto vit = notRetrievedObjectIDs.begin() + vi; { double mind = DBL_MAX; size_t mincidx = -1; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { double d = distanceL2(vectors[*vit], (*cit).centroid); if (d < mind) { mind = d; mincidx = distance(clusters.begin(), cit); } } notRetrievedObjects[vi] = Entry(*vit, mincidx, mind); // Entry(vectorID, centroidID, distance) } } sort(notRetrievedObjects.begin(), notRetrievedObjects.end()); for (auto nroit = notRetrievedObjects.begin(); nroit != notRetrievedObjects.end(); ++nroit) { clusters[(*nroit).centroidID].members.push_back(*nroit); } moveFartherObjectsToEmptyClusters(clusters); } static double calculateCentroid(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters) { double distance = 0; size_t memberCount = 0; for (auto it = clusters.begin(); it != clusters.end(); ++it) { memberCount += (*it).members.size(); if ((*it).members.size() != 0) { std::vector<float> mean(vectors[0].size(), 0.0); for (auto memit = (*it).members.begin(); memit != (*it).members.end(); ++memit) { auto mit = mean.begin(); auto& v = vectors[(*memit).vectorID]; for (auto vit = v.begin(); vit != v.end(); ++vit, ++mit) { *mit += *vit; } } for (auto mit = mean.begin(); mit != mean.end(); ++mit) { *mit /= (*it).members.size(); } distance += distanceL2((*it).centroid, mean); (*it).centroid = mean; } else { cerr << "Clustering: Fatal Error. No member!" << endl; abort(); } } return distance; } static void saveClusters(const std::string& file, std::vector<Cluster>& clusters) { std::ofstream os(file); for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { std::vector<float>& v = (*cit).centroid; for (auto it = v.begin(); it != v.end(); ++it) { os << std::setprecision(9) << (*it); if (it + 1 != v.end()) { os << "\t"; } } os << std::endl; } } double kmeansWithoutNGT(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { size_t clusterSize = std::numeric_limits<size_t>::max(); if (clusterSizeConstraint) { clusterSize = ceil((double)vectors.size() / (double)numberOfClusters); } double diff = 0; for (size_t i = 0; i < maximumIteration; i++) { std::cerr << "iteration=" << i << std::endl; assign(vectors, clusters, clusterSize); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. diff = calculateCentroid(vectors, clusters); if (diff == 0) { break; } } return diff == 0; } double kmeansWithNGT(NGT::Index& index, std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters, float epsilon) { diffHistory.clear(); NGT::Timer timer; timer.start(); float radius; double diff = 0.0; size_t resultSize; resultSize = resultSizeCoefficient * vectors.size() / clusters.size(); for (size_t i = 0; i < maximumIteration; i++) { size_t notRetrievedObjectCount = 0; radius = -1.0; assignWithNGT(index, vectors, clusters, radius, resultSize, epsilon, notRetrievedObjectCount); // centroid is recomputed. // diff is distance between the current centroids and the previous centroids. std::vector<Cluster> prevClusters = clusters; diff = calculateCentroid(vectors, clusters); timer.stop(); std::cerr << "iteration=" << i << " time=" << timer << " diff=" << diff << std::endl; timer.start(); diffHistory.push_back(diff); if (diff == 0) { break; } } return diff; } double kmeansWithNGT(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { pid_t pid = getpid(); std::stringstream str; str << "cluster-ngt." << pid; string database = str.str(); string dataFile; size_t dataSize = 0; size_t dim = clusters.front().centroid.size(); NGT::Property property; property.dimension = dim; property.graphType = NGT::Property::GraphType::GraphTypeANNG; property.objectType = NGT::Index::Property::ObjectType::Float; property.distanceType = NGT::Index::Property::DistanceType::DistanceTypeL2; NGT::Index::createGraphAndTree(database, property, dataFile, dataSize); float* data = new float[vectors.size() * dim]; float* ptr = data; dataSize = vectors.size(); for (auto vi = vectors.begin(); vi != vectors.end(); ++vi) { memcpy(ptr, &((*vi)[0]), dim * sizeof(float)); ptr += dim; } size_t threadSize = 20; NGT::Index::append(database, data, dataSize, threadSize); delete[] data; NGT::Index index(database); return kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilonFrom); } double kmeansWithNGT(NGT::Index& index, size_t numberOfClusters, std::vector<Cluster>& clusters) { NGT::GraphIndex& graph = static_cast<NGT::GraphIndex&>(index.getIndex()); NGT::ObjectSpace& os = graph.getObjectSpace(); size_t size = os.getRepository().size(); std::vector<std::vector<float> > vectors(size - 1); for (size_t idx = 1; idx < size; idx++) { try { os.getObject(idx, vectors[idx - 1]); } catch (...) { cerr << "Cannot get object " << idx << endl; } } cerr << "# of data for clustering=" << vectors.size() << endl; double diff = DBL_MAX; clusters.clear(); setupInitialClusters(vectors, numberOfClusters, clusters); for (float epsilon = epsilonFrom; epsilon <= epsilonTo; epsilon += epsilonStep) { cerr << "epsilon=" << epsilon << endl; diff = kmeansWithNGT(index, vectors, numberOfClusters, clusters, epsilon); if (diff == 0.0) { return diff; } } return diff; } double kmeansWithNGT(NGT::Index& index, size_t numberOfClusters, NGT::Index& outIndex) { std::vector<Cluster> clusters; double diff = kmeansWithNGT(index, numberOfClusters, clusters); for (auto i = clusters.begin(); i != clusters.end(); ++i) { outIndex.insert((*i).centroid); } outIndex.createIndex(16); return diff; } double kmeansWithNGT(NGT::Index& index, size_t numberOfClusters) { NGT::Property prop; index.getProperty(prop); string path = index.getPath(); index.save(); index.close(); string outIndexName = path; string inIndexName = path + ".tmp"; std::rename(outIndexName.c_str(), inIndexName.c_str()); NGT::Index::createGraphAndTree(outIndexName, prop); index.open(outIndexName); NGT::Index inIndex(inIndexName); double diff = kmeansWithNGT(inIndex, numberOfClusters, index); inIndex.close(); NGT::Index::destroy(inIndexName); return diff; } double kmeansWithNGT(string& indexName, size_t numberOfClusters) { NGT::Index inIndex(indexName); double diff = kmeansWithNGT(inIndex, numberOfClusters); inIndex.save(); inIndex.close(); return diff; } static double calculateMSE(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters) { double mse = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { mse += meanSumOfSquares((*cit).centroid, vectors[(*mit).vectorID]); } } assert(vectors.size() == count); return mse / (double)vectors.size(); } static double calculateML2(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters) { double d = 0.0; size_t count = 0; for (auto cit = clusters.begin(); cit != clusters.end(); ++cit) { count += (*cit).members.size(); double localD = 0.0; for (auto mit = (*cit).members.begin(); mit != (*cit).members.end(); ++mit) { double distance = distanceL2((*cit).centroid, vectors[(*mit).vectorID]); d += distance; localD += distance; } } if (vectors.size() != count) { std::cerr << "Warning! vectors.size() != count" << std::endl; } return d / (double)vectors.size(); } static double calculateML2FromSpecifiedCentroids(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, std::vector<size_t>& centroidIds) { double d = 0.0; size_t count = 0; for (auto it = centroidIds.begin(); it != centroidIds.end(); ++it) { Cluster& cluster = clusters[(*it)]; count += cluster.members.size(); for (auto mit = cluster.members.begin(); mit != cluster.members.end(); ++mit) { d += distanceL2(cluster.centroid, vectors[(*mit).vectorID]); } } return d / (double)vectors.size(); } void setupInitialClusters(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { if (clusters.empty()) { switch (initializationMode) { case InitializationModeHead: { getInitialCentroidsFromHead(vectors, clusters, numberOfClusters); break; } case InitializationModeRandom: { getInitialCentroidsRandomly(vectors, clusters, numberOfClusters, 0); break; } case InitializationModeKmeansPlusPlus: { getInitialCentroidsKmeansPlusPlus(vectors, clusters, numberOfClusters); break; } default: std::cerr << "proper initMode is not specified." << std::endl; exit(1); } } } bool kmeans(std::vector<std::vector<float> >& vectors, size_t numberOfClusters, std::vector<Cluster>& clusters) { setupInitialClusters(vectors, numberOfClusters, clusters); switch (clusteringType) { case ClusteringTypeKmeansWithoutNGT: return kmeansWithoutNGT(vectors, numberOfClusters, clusters); break; case ClusteringTypeKmeansWithNGT: return kmeansWithNGT(vectors, numberOfClusters, clusters); break; default: cerr << "kmeans::fatal error!. invalid clustering type. " << clusteringType << endl; abort(); break; } } static void evaluate(std::vector<std::vector<float> >& vectors, std::vector<Cluster>& clusters, char mode, std::vector<size_t> centroidIds = std::vector<size_t>()) { size_t clusterSize = std::numeric_limits<size_t>::max(); assign(vectors, clusters, clusterSize); std::cout << "The number of vectors=" << vectors.size() << std::endl; std::cout << "The number of centroids=" << clusters.size() << std::endl; if (centroidIds.size() == 0) { switch (mode) { case 'e': std::cout << "MSE=" << calculateMSE(vectors, clusters) << std::endl; break; case '2': default: std::cout << "ML2=" << calculateML2(vectors, clusters) << std::endl; break; } } else { switch (mode) { case 'e': break; case '2': default: std::cout << "ML2=" << calculateML2FromSpecifiedCentroids(vectors, clusters, centroidIds) << std::endl; break; } } } ClusteringType clusteringType; InitializationMode initializationMode; size_t numberOfClusters; bool clusterSizeConstraint; size_t maximumIteration; float epsilonFrom; float epsilonTo; float epsilonStep; size_t resultSizeCoefficient; vector<double> diffHistory; }; } // namespace NGT
HDAA_fmt_plug.c
/* HTTP Digest access authentication patch for john * * Written by Romain Raboin. OMP and intrinsics support by magnum * * This software is Copyright (c) 2008 Romain Raboin - romain.raboin at * gmail.com, and Copyright (c) 2012 magnum and it is hereby released to * the general public under the following terms: Redistribution and * use in source and binary forms, with or without modification, are * permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_HDAA; #elif FMT_REGISTERS_H john_register_one(&fmt_HDAA); #else #include <string.h> #ifdef __MMX__ #include <mmintrin.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "stdint.h" #include "sse-intrinsics.h" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #if defined(_OPENMP) #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "hdaa" #define FORMAT_NAME "HTTP Digest access authentication" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(reqinfo_t) #define SALT_ALIGN 4 #if defined(_OPENMP) static unsigned int omp_t = 1; #ifdef MMX_COEF #define OMP_SCALE 256 #else #define OMP_SCALE 64 #endif #endif #ifdef MMX_COEF #define NBKEYS (MMX_COEF * MD5_SSE_PARA) #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&60)*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*64*MMX_COEF ) #define GETOUTPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&0x1c)*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*16*MMX_COEF ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SEPARATOR '$' #define MAGIC "$response$" #define SIZE_TAB 12 // This is 8 x 64 bytes, so in MMX/SSE2 we support up to 9 limbs of MD5 #define HTMP 512 typedef struct { char **request; size_t h1tmplen; size_t h3tmplen; char h1tmp[HTMP]; char h3tmp[HTMP]; } reqinfo_t; /* digest authentication scheme : h1 = md5(user:realm:password) h2 = md5(method:digestURI) response = h3 = md5(h1:nonce:nonceCount:ClientNonce:qop:h2) */ /* request information */ enum e_req { R_RESPONSE, R_USER, R_REALM, R_METHOD, R_URI, R_NONCE, R_NONCECOUNT, R_CLIENTNONCE, R_QOP }; /* response:user:realm:method:uri:nonce:nonceCount:ClientNonce:qop */ static struct fmt_tests tests[] = { {"$response$679066476e67b5c7c4e88f04be567f8b$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$00000001$4b61913cec32e2c9$auth", "nocode"}, {"$response$faa6cb7d676e5b7c17fcbf966436aa0c$moi$myrealm$GET$/$af32592775d27b1cd06356b3a0db9ddf$00000001$8e1d49754a25aea7$auth", "kikou"}, {NULL} }; /* used by set_key */ static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; #ifdef MMX_COEF #define LIMBS 9 static unsigned char *saved_key[LIMBS]; static unsigned int *interm_key; static unsigned int *crypt_key; #else static int (*saved_len); static unsigned char (*crypt_key)[BINARY_SIZE]; #endif /* Store information about the request ()*/ static reqinfo_t *rinfo = NULL; static void init(struct fmt_main *self) { #ifdef MMX_COEF int i; #endif #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef MMX_COEF for (i = 0; i < LIMBS; i++) saved_key[i] = mem_calloc_tiny(64 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); interm_key = mem_calloc_tiny(16 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); crypt_key = mem_calloc_tiny(16 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); #else crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #endif saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { int nb = 0; int i; if (strncmp(ciphertext, MAGIC, sizeof(MAGIC) - 1) != 0) return 0; for (i = 0; ciphertext[i] != 0; i++) { if (ciphertext[i] == SEPARATOR) { nb++; } } if (nb == 10) return 1; return 0; } static void set_salt(void *salt) { rinfo = salt; } static void set_key(char *key, int index) { strcpy(saved_plain[index], key); #ifndef MMX_COEF saved_len[index] = -1; #endif } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef MMX_COEF unsigned int x,y=0; #ifdef _OPENMP for(; y < MD5_SSE_PARA * omp_t; y++) #else for(; y < MD5_SSE_PARA; y++) #endif for(x = 0; x < MMX_COEF; x++) { if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*MMX_COEF*4+x] ) return 1; } return 0; #else int index; for (index = 0; index < count; index++) if (!(memcmp(binary, crypt_key[index], BINARY_SIZE))) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef MMX_COEF unsigned int i,x,y; x = index&(MMX_COEF-1); y = index/MMX_COEF; for(i=0;i<(BINARY_SIZE/4);i++) if ( ((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[y*MMX_COEF*4+i*MMX_COEF+x] ) return 0; return 1; #else return !(memcmp(binary, crypt_key[index], BINARY_SIZE)); #endif } static int cmp_exact(char *source, int count) { return 1; } /* convert hash from binary to ascii */ #ifdef MMX_COEF // This code should be rewritten in intrinsics, reading from // MMX or SSE2 output buffers and writing to MMX/SSE2 input buffers. static inline void sse_bin2ascii(unsigned char *conv, unsigned char *src) { unsigned int index; for (index = 0; index < NBKEYS; index++) { unsigned int i, j = 0; for (i = 0; i < BINARY_SIZE; i += 2) { unsigned int t; t = (src[GETOUTPOS((i + 1), index)] & 0x0f); t <<= 12; t |= (src[GETOUTPOS((i + 1), index)] & 0xf0); t <<= 4; t |= (src[GETOUTPOS(i, index)] & 0x0f); t <<= 8; t |= ((src[GETOUTPOS(i, index)] & 0xf0) >> 4); t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); *(unsigned int*)&conv[GETPOS(j, index)] = t; j+=4; } } } #endif /* MMX_COEF */ #ifdef __MMX__ static inline void bin2ascii(__m64 *conv, __m64 *src) { unsigned int i = 0; while (i != 4) { __m64 l; __m64 r; __m64 t; __m64 u; __m64 v; /* 32 bits to 64 bits */ t = _mm_set1_pi32(0x0f0f0f0f); /* Bit-wise AND the 64-bit values in M1 and M2. */ u = _mm_and_si64(_mm_srli_si64(src[(i / 2)], 4), t); v = _mm_and_si64(src[(i / 2)], t); /* interleaving */ l = _mm_unpacklo_pi8(u, v); r = _mm_unpackhi_pi8(u, v); t = _mm_set1_pi32(0x06060606); l = _mm_add_pi32(l, t); r = _mm_add_pi32(r, t); t = _mm_set1_pi32(0x01010101); /* u = (l << 4) & t */ u = _mm_and_si64(_mm_srli_si64(l, 4), t); /* v = (r << 4) & t */ v = _mm_and_si64(_mm_srli_si64(r, 4), t); t = _mm_set1_pi32(0x00270027); /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */ u = _mm_mullo_pi16(u, t); v = _mm_mullo_pi16(v, t); t = _mm_set1_pi32(0x2a2a2a2a); u = _mm_add_pi32(u, t); v = _mm_add_pi32(v, t); conv[(i++)] = _mm_add_pi32(l, u); conv[(i++)] = _mm_add_pi32(r, v); } } #else static inline void bin2ascii(uint32_t *conv, uint32_t *source) { unsigned char *src = (unsigned char*)source; unsigned int i; unsigned int j = 0; uint32_t t = 0; for (i = 0; i < BINARY_SIZE; i += 2) { #if (ARCH_LITTLE_ENDIAN == 0) t = (src[i] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[(i + 1)] & 0x0f); #else t = (src[(i + 1)] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x100; t += ((src[i] & 0xf0) >> 4); #endif t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); conv[(j++)] = t; } } #endif /* MMX */ #if MMX_COEF static inline void crypt_done(unsigned const int *source, unsigned int *dest, int index) { unsigned int i; unsigned const int *s = &source[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*4*MMX_COEF]; unsigned int *d = &dest[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*4*MMX_COEF]; for (i = 0; i < BINARY_SIZE / 4; i++) { *d = *s; s += MMX_COEF; d += MMX_COEF; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; #if MMX_COEF #if defined(_OPENMP) #define ti (thread*NBKEYS+index) int thread; #pragma omp parallel for for (thread = 0; thread < (count+NBKEYS-1)/NBKEYS; thread++) #else #define thread 0 #define ti index #endif { static unsigned int crypt_len[NBKEYS]; unsigned int index, i, shortest, longest; for (index = 0; index < NBKEYS; index++) { int len; char temp; const char *key; key = rinfo->h1tmp; for (len = 0; len < rinfo->h1tmplen; len += 4, key += 4) *(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key; len = rinfo->h1tmplen; key = (char*)&saved_plain[ti]; while((temp = *key++)) { saved_key[len>>6][GETPOS(len, ti)] = temp; len++; } saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < (((len+8)>>6)+1)*64; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*MMX_COEF + (ti&3) + (ti>>2)*16*MMX_COEF] = len << 3; } SSEmd5body(&saved_key[0][thread*64*NBKEYS], &crypt_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); sse_bin2ascii((unsigned char*)&saved_key[0][thread*64*NBKEYS], (unsigned char*)&crypt_key[thread*4*NBKEYS]); longest = 0; shortest = HTMP; for (index = 0; index < NBKEYS; index++) { const char *key; int i, len; len = CIPHERTEXT_LENGTH - 1; key = rinfo->h3tmp + CIPHERTEXT_LENGTH; // Copy a char at a time until aligned at destination while (++len & 3) saved_key[len>>6][GETPOS(len, ti)] = *key++; // ...then a word at a time. This is a good boost, we are copying over 100 bytes. for (;len < rinfo->h3tmplen; len += 4, key += 4) *(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key; len = rinfo->h3tmplen; saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; //for (; i < (((len+8)>>6)+1)*64; i += 4) for (; i <= crypt_len[index]; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*MMX_COEF + (ti&3) + (ti>>2)*16*MMX_COEF] = len << 3; crypt_len[index] = len; if (len > longest) longest = len; if (len < shortest) shortest = len; } // First limb SSEmd5body(&saved_key[0][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); // Copy any output that is done now if (shortest < 56) { if (longest < 56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (crypt_len[index] < 56) crypt_done(interm_key, crypt_key, ti); } // Do the rest of the limbs for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SSEmd5body(&saved_key[i][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], SSEi_RELOAD|SSEi_MIXED_IN); // Copy any output that is done now if (shortest < i*64+56) { if (shortest > (i-1)*64+55 && longest < i*64+56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done(interm_key, crypt_key, ti); } } } #undef thread #undef ti #else int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; int len; #ifdef _OPENMP char h3tmp[HTMP]; char h1tmp[HTMP]; #else char *h3tmp; char *h1tmp; #endif size_t tmp; #ifdef __MMX__ __m64 h1[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else uint32_t h1[BINARY_SIZE / sizeof(uint32_t)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif tmp = rinfo->h1tmplen; if ((len = saved_len[index]) < 0) len = saved_len[index] = strlen(saved_plain[index]); #ifdef _OPENMP memcpy(h1tmp, rinfo->h1tmp, tmp); memcpy(h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmplen - CIPHERTEXT_LENGTH); #else h3tmp = rinfo->h3tmp; h1tmp = rinfo->h1tmp; #endif memcpy(&h1tmp[tmp], saved_plain[index], len); MD5_Init(&ctx); MD5_Update(&ctx, h1tmp, len + tmp); MD5_Final((unsigned char*)h1, &ctx); bin2ascii(conv, h1); memcpy(h3tmp, conv, CIPHERTEXT_LENGTH); MD5_Init(&ctx); MD5_Update(&ctx, h3tmp, rinfo->h3tmplen); MD5_Final(crypt_key[index], &ctx); } #endif return count; } static char *mystrndup(const char *s, size_t n) { size_t tmp; size_t size; char *ret; for (tmp = 0; s[tmp] != 0 && tmp <= n; tmp++); size = n; if (tmp < size) size = tmp; if ((ret = mem_alloc_tiny(sizeof(char) * size + 1, MEM_ALIGN_WORD)) == NULL) return NULL; memmove(ret, s, size); ret[size] = 0; return ret; } static size_t reqlen(char *str) { size_t len; for (len = 0; str[len] != 0 && str[len] != SEPARATOR; len++); return len; } static void *salt(char *ciphertext) { int nb; int i; char **request; char *str; reqinfo_t *r; #ifdef __MMX__ __m64 h2[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else unsigned int h2[BINARY_SIZE / sizeof(unsigned int)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif MD5_CTX ctx; /* parse the password string */ request = mem_alloc_tiny(sizeof(char*) * SIZE_TAB, MEM_ALIGN_WORD); r = mem_calloc_tiny(sizeof(*r), MEM_ALIGN_WORD); for (nb = 0, i = 1; ciphertext[i] != 0; i++) { if (ciphertext[i] == SEPARATOR) { i++; request[nb] = mystrndup(&ciphertext[i], reqlen(&ciphertext[i])); nb++; } } /* calculate h2 (h2 = md5(method:digestURI))*/ str = mem_alloc(strlen(request[R_METHOD]) + strlen(request[R_URI]) + 2); sprintf(str, "%s:%s", request[R_METHOD], request[R_URI]); MD5_Init(&ctx); MD5_Update(&ctx, str, strlen(str)); MD5_Final((unsigned char*)h2, &ctx); memset(conv, 0, CIPHERTEXT_LENGTH + 1); bin2ascii(conv, h2); MEM_FREE(str); /* create a part of h1 (h1tmp = request:realm:)*/ snprintf(r->h1tmp, HTMP - PLAINTEXT_LENGTH, "%s:%s:", request[R_USER], request[R_REALM]); /* create a part of h3 (h3tmp = nonce:noncecount:clientnonce:qop:h2)*/ snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s:%s:%s:%s", request[R_NONCE], request[R_NONCECOUNT], request[R_CLIENTNONCE], request[R_QOP], (char*)conv); r->request = request; r->h1tmplen = strlen(r->h1tmp); r->h3tmplen = strlen(&r->h3tmp[CIPHERTEXT_LENGTH]) + CIPHERTEXT_LENGTH; return r; } /* convert response to binary form */ static void *binary(char *ciphertext) { static unsigned int realcipher[BINARY_SIZE / sizeof(int)]; int i; ciphertext += 10; for (i = 0; i < BINARY_SIZE; i++) { ((unsigned char*)realcipher)[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } return (void*) realcipher; } #ifdef MMX_COEF #define HASH_OFFSET (index&(MMX_COEF-1))+(index/MMX_COEF)*MMX_COEF*4 static int get_hash_0(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xf; } static int get_hash_1(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xff; } static int get_hash_2(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xfff; } static int get_hash_3(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xffff; } static int get_hash_4(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xfffff; } static int get_hash_5(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0xffffff; } static int get_hash_6(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & 0x7ffffff; } #else static int get_hash_0(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & 0xf; } static int get_hash_1(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & 0xff; } static int get_hash_2(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & 0xfff; } static int get_hash_3(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & 0xffff; } static int get_hash_4(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & 0xfffff; } static int get_hash_5(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & 0xffffff; } static int get_hash_6(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & 0x7ffffff; } #endif struct fmt_main fmt_HDAA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_OMP | FMT_CASE | FMT_8_BIT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, binary, salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
sigmoid_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: hhchen@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #define SIGMOID_MAX(a, b) ((a) > (b) ? (a) : (b)) #define SIGMOID_MIN(a, b) ((a) < (b) ? (a) : (b)) int ref_sigmoid_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int dim_num = input_tensor->dim_num; if (dim_num == 4) { int batch = input_tensor->dims[0]; int channel = input_tensor->dims[1]; int cstep = input_tensor->dims[2] * input_tensor->dims[3]; int bstep = channel * cstep; for (int n = 0; n < batch; n++) { #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float* input_data = (float*)input_tensor->data + n * bstep + c * cstep; float* output_data = (float*)output_tensor->data + n * bstep + c * cstep; for (int i = 0; i < cstep; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0f); output_data[i] = 1.f / (1 + expf(-output_data[i])); } } } } else { uint32_t elem_num = input_tensor->elem_num; float* input_data = (float*)input_tensor->data; float* output_data = (float*)output_tensor->data; for (int i = 0; i < elem_num; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0f); output_data[i] = 1.f / (1 + expf(-output_data[i])); } } return 0; } int ref_sigmoid_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { /* dequant */ uint8_t* input_uint8 = (uint8_t*)input_tensor->data; uint8_t* output_uint8 = (uint8_t*)output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float* input_fp32 = (float*)sys_malloc(input_size * sizeof(float)); float* output_fp32 = (float*)sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_fp32[i] = ((float)input_uint8[i] - (float)input_zero) * input_scale; } for (int i = 0; i < input_size; i++) { output_fp32[i] = SIGMOID_MIN(input_fp32[i], 30.0f); output_fp32[i] = SIGMOID_MAX(input_fp32[i], -30.0f); output_fp32[i] = 1 / (1 + exp(-output_fp32[i])); } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(output_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_fp32); sys_free(output_fp32); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int reshape_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_sigmoid_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if (input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_sigmoid_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops sigmoid_node_ops = {.prerun = prerun, .run = run, .reshape = reshape_node, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_sigmoid_ref_op() { return register_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); } int unregister_sigmoid_ref_op() { return unregister_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); }
arrays.c
/** * module with tools for manipulating arrays * Julien Lesgourgues, 18.04.2010 */ #include "arrays.h" /** * Called by thermodynamics_init(); perturb_sources(). */ int array_derive( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ int index_y, int index_dydx, ErrorMsg errmsg) { int i; double dx1,dx2,dy1,dy2,weight1,weight2; class_test((index_dydx == index_x) || (index_dydx == index_y), errmsg, "output column %d must differ from input columns %d and %d",index_dydx,index_x,index_y); dx2=array[1*n_columns+index_x]-array[0*n_columns+index_x]; dy2=array[1*n_columns+index_y]-array[0*n_columns+index_y]; for (i=1; i<n_lines-1; i++) { dx1 = dx2; dy1 = dy2; dx2 = array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]; dy2 = array[(i+1)*n_columns+index_y]-array[i*n_columns+index_y]; class_test((dx1 == 0) || (dx2 == 0), errmsg, "stop to avoid division by zero"); weight1 = dx2*dx2; weight2 = dx1*dx1; array[i*n_columns+index_dydx] = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2); if (i == 1) array[(i-1)*n_columns+index_dydx] = 2.*dy1/dx1 - array[i*n_columns+index_dydx]; if (i == n_lines-2) array[(i+1)*n_columns+index_dydx] = 2.*dy2/dx2 - array[i*n_columns+index_dydx]; } return _SUCCESS_; } int array_derive_spline( double * x_array, int n_lines, double * array, double * array_splined, int n_columns, int index_y, int index_dydx, ErrorMsg errmsg) { int i; double h; class_test(index_dydx == index_y, errmsg, "Output column %d must differ from input columns %d", index_dydx, index_y); class_test(n_lines<2, errmsg, "no possible derivation with less than two lines"); for (i=0; i<n_lines-1; i++) { h = x_array[i+1] - x_array[i]; if (h == 0) { sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__); return _FAILURE_; } array[i*n_columns+index_dydx] = (array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h - h / 6. * (array_splined[(i+1)*n_columns+index_y] + 2. * array_splined[i*n_columns+index_y]); } h = x_array[n_lines-1] - x_array[n_lines-2]; array[(n_lines-1)*n_columns+index_dydx] = (array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h + h / 6. * (2. * array_splined[(n_lines-1)*n_columns+index_y] + array_splined[(n_lines-2)*n_columns+index_y]); return _SUCCESS_; } int array_derive_spline_table_line_to_line( double * x_array, int n_lines, double * array, int n_columns, int index_y, int index_ddy, int index_dy, ErrorMsg errmsg) { int i; double h; class_test(index_ddy == index_y, errmsg, "Output column %d must differ from input columns %d", index_ddy, index_y); class_test(index_ddy == index_dy, errmsg, "Output column %d must differ from input columns %d", index_ddy, index_dy); class_test(n_lines<2, errmsg, "no possible derivation with less than two lines"); for (i=0; i<n_lines-1; i++) { h = x_array[i+1] - x_array[i]; if (h == 0) { sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__); return _FAILURE_; } array[i*n_columns+index_dy] = (array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h - h / 6. * (array[(i+1)*n_columns+index_ddy] + 2. * array[i*n_columns+index_ddy]); } h = x_array[n_lines-1] - x_array[n_lines-2]; array[(n_lines-1)*n_columns+index_dy] = (array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h + h / 6. * (2. * array[(n_lines-1)*n_columns+index_ddy] + array[(n_lines-2)*n_columns+index_ddy]); return _SUCCESS_; } int array_derive1_order2_table_line_to_line( double * x_array, int n_lines, double * array, int n_columns, int index_y, int index_dy, ErrorMsg errmsg) { int i=1; double dxp,dxm,dyp,dym; if (n_lines < 2) { sprintf(errmsg,"%s(L:%d) routine called with n_lines=%d, should be at least 2",__func__,__LINE__,n_lines); return _FAILURE_; } dxp = x_array[2] - x_array[1]; dxm = x_array[0] - x_array[1]; dyp = *(array+2*n_columns+index_y) - *(array+1*n_columns+index_y); dym = *(array+0*n_columns+index_y) - *(array+1*n_columns+index_y); if ((dxp*dxm*(dxm-dxp)) == 0.) { sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__); return _FAILURE_; } *(array+1*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp)); *(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy) - (x_array[1] - x_array[0]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm)); for (i=2; i<n_lines-1; i++) { dxp = x_array[i+1] - x_array[i]; dxm = x_array[i-1] - x_array[i]; dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y); dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y); if ((dxp*dxm*(dxm-dxp)) == 0.) { sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__); return _FAILURE_; } *(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp)); } *(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy) + (x_array[n_lines-1] - x_array[n_lines-2]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm)); return _SUCCESS_; } int array_derive2_order2_table_line_to_line( double * x_array, int n_lines, double * array, int n_columns, int index_y, int index_dy, int index_ddy, ErrorMsg errmsg) { int i; double dxp,dxm,dyp,dym; for (i=1; i<n_lines-1; i++) { dxp = x_array[i+1] - x_array[i]; dxm = x_array[i-1] - x_array[i]; dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y); dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y); if ((dxp*dxm*(dxm-dxp)) == 0.) { sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__); return _FAILURE_; } *(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp)); *(array+i*n_columns+index_ddy) = 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm)); } *(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy) - (x_array[1] - x_array[0]) * *(array+1*n_columns+index_ddy); *(array+0*n_columns+index_ddy) = *(array+1*n_columns+index_ddy); *(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy) + (x_array[n_lines-1] - x_array[n_lines-2]) * *(array+(n_lines-2)*n_columns+index_ddy); *(array+(n_lines-1)*n_columns+index_ddy) = *(array+(n_lines-2)*n_columns+index_ddy); return _SUCCESS_; } int array_integrate_spline_table_line_to_line( double * x_array, int n_lines, double * array, int n_columns, int index_y, int index_ddy, int index_inty, ErrorMsg errmsg) { int i; double h; *(array+0*n_columns+index_inty) = 0.; for (i=0; i < n_lines-1; i++) { h = (x_array[i+1]-x_array[i]); *(array+(i+1)*n_columns+index_inty) = *(array+i*n_columns+index_inty) + (array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+ (array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.; } return _SUCCESS_; } /** * Not called. */ int array_derive_two( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ int index_y, int index_dydx, int index_ddydxdx, ErrorMsg errmsg) { int i; double dx1,dx2,dy1,dy2,weight1,weight2; if ((index_dydx == index_x) || (index_dydx == index_y)) { sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_dydx,index_x,index_y); return _FAILURE_; } dx2=*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x); dy2=*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y); for (i=1; i<n_lines-1; i++) { dx1 = dx2; dy1 = dy2; dx2 = *(array+(i+1)*n_columns+index_x)-*(array+i*n_columns+index_x); dy2 = *(array+(i+1)*n_columns+index_y)-*(array+i*n_columns+index_y); weight1 = dx2*dx2; weight2 = dx1*dx1; if ((dx1 == 0.) && (dx2 == 0.)) { sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__); return _FAILURE_; } *(array+i*n_columns+index_dydx) = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2); *(array+i*n_columns+index_ddydxdx) = (dx2*dy1-dx1*dy2) / (weight1*dx1+weight2*dx2); if (i == 1) { *(array+(i-1)*n_columns+index_dydx) = 2.*dy1/dx1 - *(array+i*n_columns+index_dydx); *(array+(i-1)*n_columns+index_ddydxdx) = *(array+i*n_columns+index_ddydxdx); } if (i == n_lines-2) { *(array+(i+1)*n_columns+index_dydx) = 2.*dy2/dx2 - *(array+i*n_columns+index_dydx); *(array+(i+1)*n_columns+index_dydx) = *(array+i*n_columns+index_ddydxdx); } } return _SUCCESS_; } int array_spline( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ int index_y, int index_ddydx2, short spline_mode, ErrorMsg errmsg) { int i,k; double p,qn,sig,un; double * u; double dy_first; double dy_last; if (n_lines < 3) { sprintf(errmsg,"%s(L:%d) n_lines=%d, while routine needs n_lines >= 3",__func__,__LINE__,n_lines); return _FAILURE_; } u = malloc((n_lines-1) * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (spline_mode == _SPLINE_NATURAL_) { *(array+0*n_columns+index_ddydx2) = u[0] = 0.0; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_first = ((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))* (*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))* (*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))- (*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))* (*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))* (*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/ ((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))* (*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))* (*(array+2*n_columns+index_x)-*(array+1*n_columns+index_x))); *(array+0*n_columns+index_ddydx2) = -0.5; u[0] = (3./(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x)))* ((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/ (*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x)) -dy_first); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } for (i=1; i < n_lines-1; i++) { sig = (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x)) / (*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x)); p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0; *(array+i*n_columns+index_ddydx2) = (sig-1.0)/p; u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y)) / (*(array+(i+1)*n_columns+index_x) - *(array+i*n_columns+index_x)) - (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y)) / (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x)); u[i]= (6.0 * u[i] / (*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x)) - sig * u[i-1]) / p; } if (spline_mode == _SPLINE_NATURAL_) { qn=0.; un=0.; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_last = ((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))* (*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))* (*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))- (*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))* (*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))* (*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/ ((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))* (*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))* (*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-2)*n_columns+index_x))); qn=0.5; un = (3./(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)))* (dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/ (*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x))); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } *(array+(n_lines-1)*n_columns+index_ddydx2) = (un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0); for (k=n_lines-2; k>=0; k--) *(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) * *(array+(k+1)*n_columns+index_ddydx2) + u[k]; free(u); return _SUCCESS_; } int array_spline_table_line_to_line( double * x, /* vector of size x_size */ int n_lines, double * array, int n_columns, int index_y, int index_ddydx2, short spline_mode, ErrorMsg errmsg) { int i,k; double p,qn,sig,un; double * u; double dy_first; double dy_last; u = malloc((n_lines-1) * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (spline_mode == _SPLINE_NATURAL_) { *(array+0*n_columns+index_ddydx2) = u[0] = 0.0; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_first = ((x[2]-x[0])*(x[2]-x[0])* (*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))- (x[1]-x[0])*(x[1]-x[0])* (*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/ ((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1])); *(array+0*n_columns+index_ddydx2) = -0.5; u[0] = (3./(x[1] - x[0]))* ((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/ (x[1] - x[0])-dy_first); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } for (i=1; i < n_lines-1; i++) { sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1]); p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0; *(array+i*n_columns+index_ddydx2) = (sig-1.0)/p; u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y)) / (x[i+1] - x[i]) - (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y)) / (x[i] - x[i-1]); u[i]= (6.0 * u[i] / (x[i+1] - x[i-1]) - sig * u[i-1]) / p; } if (spline_mode == _SPLINE_NATURAL_) { qn=0.; un=0.; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_last = ((x[n_lines-3]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-1])* (*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))- (x[n_lines-2]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])* (*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/ ((x[n_lines-3]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-2])); qn=0.5; un = (3./(x[n_lines-1] - x[n_lines-2]))* (dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/ (x[n_lines-1] - x[n_lines-2])); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } *(array+(n_lines-1)*n_columns+index_ddydx2) = (un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0); for (k=n_lines-2; k>=0; k--) *(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) * *(array+(k+1)*n_columns+index_ddydx2) + u[k]; free(u); return _SUCCESS_; } int array_spline_table_lines( double * x, /* vector of size x_size */ int x_size, double * y_array, /* array of size x_size*y_size with elements y_array[index_x*y_size+index_y] */ int y_size, double * ddy_array, /* array of size x_size*y_size */ short spline_mode, ErrorMsg errmsg ) { double * p; double * qn; double * un; double * u; double sig; int index_x; int index_y; double dy_first; double dy_last; u = malloc((x_size-1) * y_size * sizeof(double)); p = malloc(y_size * sizeof(double)); qn = malloc(y_size * sizeof(double)); un = malloc(y_size * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (p == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__); return _FAILURE_; } if (qn == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__); return _FAILURE_; } if (un == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__); return _FAILURE_; } if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed. index_x=0; if (spline_mode == _SPLINE_NATURAL_) { for (index_y=0; index_y < y_size; index_y++) { ddy_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0; } } else { if (spline_mode == _SPLINE_EST_DERIV_) { for (index_y=0; index_y < y_size; index_y++) { dy_first = ((x[2]-x[0])*(x[2]-x[0])* (y_array[1*y_size+index_y]-y_array[0*y_size+index_y])- (x[1]-x[0])*(x[1]-x[0])* (y_array[2*y_size+index_y]-y_array[0*y_size+index_y]))/ ((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1])); ddy_array[index_x*y_size+index_y] = -0.5; u[index_x*y_size+index_y] = (3./(x[1] - x[0]))* ((y_array[1*y_size+index_y]-y_array[0*y_size+index_y])/ (x[1] - x[0])-dy_first); } } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } for (index_x=1; index_x < x_size-1; index_x++) { sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]); for (index_y=0; index_y < y_size; index_y++) { p[index_y] = sig * ddy_array[(index_x-1)*y_size+index_y] + 2.0; ddy_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y]; u[index_x*y_size+index_y] = (y_array[(index_x+1)*y_size+index_y] - y_array[index_x*y_size+index_y]) / (x[index_x+1] - x[index_x]) - (y_array[index_x*y_size+index_y] - y_array[(index_x-1)*y_size+index_y]) / (x[index_x] - x[index_x-1]); u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] / (x[index_x+1] - x[index_x-1]) - sig * u[(index_x-1)*y_size+index_y]) / p[index_y]; } } if (spline_mode == _SPLINE_NATURAL_) { for (index_y=0; index_y < y_size; index_y++) { qn[index_y]=un[index_y]=0.0; } } else { if (spline_mode == _SPLINE_EST_DERIV_) { for (index_y=0; index_y < y_size; index_y++) { dy_last = ((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])* (y_array[(x_size-2)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y])- (x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])* (y_array[(x_size-3)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y]))/ ((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2])); qn[index_y]=0.5; un[index_y]= (3./(x[x_size-1] - x[x_size-2]))* (dy_last-(y_array[(x_size-1)*y_size+index_y] - y_array[(x_size-2)*y_size+index_y])/ (x[x_size-1] - x[x_size-2])); } } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } index_x=x_size-1; for (index_y=0; index_y < y_size; index_y++) { ddy_array[index_x*y_size+index_y] = (un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) / (qn[index_y] * ddy_array[(index_x-1)*y_size+index_y] + 1.0); } for (index_x=x_size-2; index_x >= 0; index_x--) { for (index_y=0; index_y < y_size; index_y++) { ddy_array[index_x*y_size+index_y] = ddy_array[index_x*y_size+index_y] * ddy_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y]; } } free(qn); free(un); free(p); free(u); return _SUCCESS_; } int array_logspline_table_lines( double * x, /* vector of size x_size */ int x_size, double * y_array, /* array of size x_size*y_size with elements y_array[index_x*y_size+index_y] */ int y_size, double * ddlny_array, /* array of size x_size*y_size */ short spline_mode, ErrorMsg errmsg ) { double * p; double * qn; double * un; double * u; double sig; int index_x; int index_y; double dy_first; double dy_last; u = malloc((x_size-1) * y_size * sizeof(double)); p = malloc(y_size * sizeof(double)); qn = malloc(y_size * sizeof(double)); un = malloc(y_size * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (p == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__); return _FAILURE_; } if (qn == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__); return _FAILURE_; } if (un == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__); return _FAILURE_; } if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed. index_x=0; if (spline_mode == _SPLINE_NATURAL_) { for (index_y=0; index_y < y_size; index_y++) { ddlny_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0; } } else { if (spline_mode == _SPLINE_EST_DERIV_) { for (index_y=0; index_y < y_size; index_y++) { dy_first = ((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))* (log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))- (log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))* (log(y_array[2*y_size+index_y])-log(y_array[0*y_size+index_y])))/ ((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1]))); ddlny_array[index_x*y_size+index_y] = -0.5; u[index_x*y_size+index_y] = (3./(log(x[1]) - log(x[0])))* ((log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))/ (log(x[1]) - log(x[0]))-dy_first); } } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } for (index_x=1; index_x < x_size-1; index_x++) { sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1])); for (index_y=0; index_y < y_size; index_y++) { p[index_y] = sig * ddlny_array[(index_x-1)*y_size+index_y] + 2.0; ddlny_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y]; u[index_x*y_size+index_y] = (log(y_array[(index_x+1)*y_size+index_y]) - log(y_array[index_x*y_size+index_y])) / (log(x[index_x+1]) - log(x[index_x])) - (log(y_array[index_x*y_size+index_y]) - log(y_array[(index_x-1)*y_size+index_y])) / (log(x[index_x]) - log(x[index_x-1])); u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] / (log(x[index_x+1]) - log(x[index_x-1])) - sig * u[(index_x-1)*y_size+index_y]) / p[index_y]; } } if (spline_mode == _SPLINE_NATURAL_) { for (index_y=0; index_y < y_size; index_y++) { qn[index_y]=un[index_y]=0.0; } } else { if (spline_mode == _SPLINE_EST_DERIV_) { for (index_y=0; index_y < y_size; index_y++) { dy_last = ((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-1]))* (log(y_array[(x_size-2)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y]))- (log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))* (log(y_array[(x_size-3)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y])))/ ((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-2]))); qn[index_y]=0.5; un[index_y]= (3./(log(x[x_size-1]) - log(x[x_size-2])))* (dy_last-(log(y_array[(x_size-1)*y_size+index_y]) - log(y_array[(x_size-2)*y_size+index_y]))/ (log(x[x_size-1]) - log(x[x_size-2]))); } } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } index_x=x_size-1; for (index_y=0; index_y < y_size; index_y++) { ddlny_array[index_x*y_size+index_y] = (un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) / (qn[index_y] * ddlny_array[(index_x-1)*y_size+index_y] + 1.0); } for (index_x=x_size-2; index_x >= 0; index_x--) { for (index_y=0; index_y < y_size; index_y++) { ddlny_array[index_x*y_size+index_y] = ddlny_array[index_x*y_size+index_y] * ddlny_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y]; } } free(qn); free(un); free(p); free(u); return _SUCCESS_; } int array_spline_table_columns( double * x, /* vector of size x_size */ int x_size, double * y_array, /* array of size x_size*y_size with elements y_array[index_y*x_size+index_x] */ int y_size, double * ddy_array, /* array of size x_size*y_size */ short spline_mode, ErrorMsg errmsg ) { double * p; double * qn; double * un; double * u; double sig; int index_x; int index_y; double dy_first; double dy_last; u = malloc((x_size-1) * y_size * sizeof(double)); p = malloc(y_size * sizeof(double)); qn = malloc(y_size * sizeof(double)); un = malloc(y_size * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (p == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__); return _FAILURE_; } if (qn == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__); return _FAILURE_; } if (un == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__); return _FAILURE_; } if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed. index_x=0; if (spline_mode == _SPLINE_NATURAL_) { for (index_y=0; index_y < y_size; index_y++) { ddy_array[index_y*x_size+index_x] = 0.0; u[index_x*y_size+index_y] = 0.0; } } else { if (spline_mode == _SPLINE_EST_DERIV_) { class_test(x[2]-x[0]==0., errmsg, "x[2]=%g, x[0]=%g, stop to avoid seg fault",x[2],x[0]); class_test(x[1]-x[0]==0., errmsg, "x[1]=%g, x[0]=%g, stop to avoid seg fault",x[1],x[0]); class_test(x[2]-x[1]==0., errmsg, "x[2]=%g, x[1]=%g, stop to avoid seg fault",x[2],x[1]); for (index_y=0; index_y < y_size; index_y++) { dy_first = ((x[2]-x[0])*(x[2]-x[0])* (y_array[index_y*x_size+1]-y_array[index_y*x_size+0])- (x[1]-x[0])*(x[1]-x[0])* (y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/ ((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1])); ddy_array[index_y*x_size+index_x] = -0.5; u[index_x*y_size+index_y] = (3./(x[1] - x[0]))* ((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/ (x[1] - x[0])-dy_first); } } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } for (index_x=1; index_x < x_size-1; index_x++) { sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]); for (index_y=0; index_y < y_size; index_y++) { p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0; ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y]; u[index_x*y_size+index_y] = (y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x]) / (x[index_x+1] - x[index_x]) - (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)]) / (x[index_x] - x[index_x-1]); u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] / (x[index_x+1] - x[index_x-1]) - sig * u[(index_x-1)*y_size+index_y]) / p[index_y]; } } if (spline_mode == _SPLINE_NATURAL_) { for (index_y=0; index_y < y_size; index_y++) { qn[index_y]=un[index_y]=0.0; } } else { if (spline_mode == _SPLINE_EST_DERIV_) { for (index_y=0; index_y < y_size; index_y++) { dy_last = ((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])* (y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])- (x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])* (y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/ ((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2])); qn[index_y]=0.5; un[index_y]= (3./(x[x_size-1] - x[x_size-2]))* (dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/ (x[x_size-1] - x[x_size-2])); } } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } index_x=x_size-1; for (index_y=0; index_y < y_size; index_y++) { ddy_array[index_y*x_size+index_x] = (un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) / (qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0); } for (index_x=x_size-2; index_x >= 0; index_x--) { for (index_y=0; index_y < y_size; index_y++) { ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] * ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y]; } } free(qn); free(p); free(u); free(un); return _SUCCESS_; } int array_spline_table_columns2( double * x, /* vector of size x_size */ int x_size, double * y_array, /* array of size x_size*y_size with elements y_array[index_y*x_size+index_x] */ int y_size, double * ddy_array, /* array of size x_size*y_size */ short spline_mode, ErrorMsg errmsg ) { double * p; double * qn; double * un; double * u; double sig; int index_x; int index_y; double dy_first; double dy_last; u = malloc((x_size-1) * y_size * sizeof(double)); p = malloc(y_size * sizeof(double)); qn = malloc(y_size * sizeof(double)); un = malloc(y_size * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (p == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__); return _FAILURE_; } if (qn == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__); return _FAILURE_; } if (un == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__); return _FAILURE_; } if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ 3 x-values are needed. #pragma omp parallel \ shared(x,x_size,y_array,y_size,ddy_array,spline_mode,p,qn,un,u) \ private(index_y,index_x,sig,dy_first,dy_last) { #pragma omp for schedule (dynamic) for (index_y=0; index_y < y_size; index_y++) { if (spline_mode == _SPLINE_NATURAL_) { ddy_array[index_y*x_size+0] = 0.0; u[0*y_size+index_y] = 0.0; } else { dy_first = ((x[2]-x[0])*(x[2]-x[0])* (y_array[index_y*x_size+1]-y_array[index_y*x_size+0])- (x[1]-x[0])*(x[1]-x[0])* (y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/ ((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1])); ddy_array[index_y*x_size+0] = -0.5; u[0*y_size+index_y] = (3./(x[1] - x[0]))* ((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/ (x[1] - x[0])-dy_first); } for (index_x=1; index_x < x_size-1; index_x++) { sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]); p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0; ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y]; u[index_x*y_size+index_y] = (y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x]) / (x[index_x+1] - x[index_x]) - (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)]) / (x[index_x] - x[index_x-1]); u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] / (x[index_x+1] - x[index_x-1]) - sig * u[(index_x-1)*y_size+index_y]) / p[index_y]; } if (spline_mode == _SPLINE_NATURAL_) { qn[index_y]=un[index_y]=0.0; } else { dy_last = ((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])* (y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])- (x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])* (y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/ ((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2])); qn[index_y]=0.5; un[index_y]= (3./(x[x_size-1] - x[x_size-2]))* (dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/ (x[x_size-1] - x[x_size-2])); } index_x=x_size-1; ddy_array[index_y*x_size+index_x] = (un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) / (qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0); for (index_x=x_size-2; index_x >= 0; index_x--) { ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] * ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y]; } } } free(qn); free(p); free(u); free(un); return _SUCCESS_; } int array_spline_table_one_column( double * x, /* vector of size x_size */ int x_size, double * y_array, /* array of size x_size*y_size with elements y_array[index_y*x_size+index_x] */ int y_size, int index_y, double * ddy_array, /* array of size x_size*y_size */ short spline_mode, ErrorMsg errmsg ) { double p; double qn; double un; double * u; double sig; int index_x; double dy_first; double dy_last; u = malloc((x_size-1) * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed. /************************************************/ index_x=0; if (spline_mode == _SPLINE_NATURAL_) { ddy_array[index_y*x_size+index_x] = 0.0; u[index_x] = 0.0; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_first = ((x[2]-x[0])*(x[2]-x[0])* (y_array[index_y*x_size+1]-y_array[index_y*x_size+0])- (x[1]-x[0])*(x[1]-x[0])* (y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/ ((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1])); ddy_array[index_y*x_size+index_x] = -0.5; u[index_x] = (3./(x[1] - x[0]))* ((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/ (x[1] - x[0])-dy_first); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } /************************************************/ for (index_x=1; index_x < x_size-1; index_x++) { sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]); p = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0; ddy_array[index_y*x_size+index_x] = (sig-1.0)/p; u[index_x] = (y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x]) / (x[index_x+1] - x[index_x]) - (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)]) / (x[index_x] - x[index_x-1]); u[index_x] = (6.0 * u[index_x] / (x[index_x+1] - x[index_x-1]) - sig * u[index_x-1]) / p; } /************************************************/ if (spline_mode == _SPLINE_NATURAL_) { qn=un=0.0; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_last = ((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])* (y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])- (x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])* (y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/ ((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2])); qn=0.5; un= (3./(x[x_size-1] - x[x_size-2]))* (dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/ (x[x_size-1] - x[x_size-2])); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } /************************************************/ index_x=x_size-1; ddy_array[index_y*x_size+index_x] = (un - qn * u[index_x-1]) / (qn * ddy_array[index_y*x_size+(index_x-1)] + 1.0); for (index_x=x_size-2; index_x >= 0; index_x--) { ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] * ddy_array[index_y*x_size+(index_x+1)] + u[index_x]; } free(u); return _SUCCESS_; } int array_logspline_table_one_column( double * x, /* vector of size x_size */ int x_size, int x_stop, double * y_array, /* array of size x_size*y_size with elements y_array[index_y*x_size+index_x] */ int y_size, int index_y, double * ddlogy_array, /* array of size x_size*y_size */ short spline_mode, ErrorMsg errmsg ) { double p; double qn; double un; double * u; double sig; int index_x; double dy_first; double dy_last; u = malloc((x_stop-1) * sizeof(double)); if (u == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__); return _FAILURE_; } if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed. /************************************************/ index_x=0; if (spline_mode == _SPLINE_NATURAL_) { ddlogy_array[index_y*x_size+index_x] = 0.0; u[index_x] = 0.0; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_first = ((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))* (log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))- (log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))* (log(y_array[index_y*x_size+2])-log(y_array[index_y*x_size+0])))/ ((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1]))); ddlogy_array[index_y*x_size+index_x] = -0.5; u[index_x] = (3./(log(x[1]) - log(x[0])))* ((log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))/ (log(x[1]) - log(x[0]))-dy_first); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } /************************************************/ for (index_x=1; index_x < x_stop-1; index_x++) { sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1])); p = sig * ddlogy_array[index_y*x_size+(index_x-1)] + 2.0; ddlogy_array[index_y*x_size+index_x] = (sig-1.0)/p; u[index_x] = (log(y_array[index_y*x_size+(index_x+1)]) - log(y_array[index_y*x_size+index_x])) / (log(x[index_x+1]) - log(x[index_x])) - (log(y_array[index_y*x_size+index_x]) - log(y_array[index_y*x_size+(index_x-1)])) / (log(x[index_x]) - log(x[index_x-1])); u[index_x] = (6.0 * u[index_x] / (log(x[index_x+1]) - log(x[index_x-1])) - sig * u[index_x-1]) / p; } /************************************************/ if (spline_mode == _SPLINE_NATURAL_) { qn=un=0.0; } else { if (spline_mode == _SPLINE_EST_DERIV_) { dy_last = ((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-3])-log(x[x_stop-1]))* (log(y_array[index_y*x_size+(x_stop-2)])-log(y_array[index_y*x_size+(x_stop-1)]))- (log(x[x_stop-2])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))* (log(y_array[index_y*x_size+(x_stop-3)])-log(y_array[index_y*x_size+(x_stop-1)])))/ ((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))* (log(x[x_stop-3])-log(x[x_stop-2]))); qn=0.5; un= (3./(log(x[x_stop-1]) - log(x[x_stop-2])))* (dy_last-(log(y_array[index_y*x_size+(x_stop-1)]) - log(y_array[index_y*x_size+(x_stop-2)]))/ (log(x[x_stop-1]) - log(x[x_stop-2]))); } else { sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode); return _FAILURE_; } } /************************************************/ index_x=x_stop-1; ddlogy_array[index_y*x_size+index_x] = (un - qn * u[index_x-1]) / (qn * ddlogy_array[index_y*x_size+(index_x-1)] + 1.0); for (index_x=x_stop-2; index_x >= 0; index_x--) { ddlogy_array[index_y*x_size+index_x] = ddlogy_array[index_y*x_size+index_x] * ddlogy_array[index_y*x_size+(index_x+1)] + u[index_x]; } free(u); return _SUCCESS_; } int array_integrate_all_spline( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ int index_y, int index_ddy, double * result, ErrorMsg errmsg) { int i; double h; *result = 0; for (i=0; i < n_lines-1; i++) { h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]); *result += (array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+ (array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.; } return _SUCCESS_; } int array_integrate_all_trapzd_or_spline( double * array, int n_columns, int n_lines, int index_start_spline, int index_x, /** from 0 to (n_columns-1) */ int index_y, int index_ddy, double * result, ErrorMsg errmsg) { int i; double h; if ((index_start_spline<0) || (index_start_spline>=n_lines)) { sprintf(errmsg,"%s(L:%d) index_start_spline outside of range",__func__,__LINE__); return _FAILURE_; } *result = 0; /* trapezoidal integration till given index */ for (i=0; i < index_start_spline; i++) { h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]); *result += (array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.; } /* then, spline integration */ for (i=index_start_spline; i < n_lines-1; i++) { h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]); *result += (array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+ (array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.; } return _SUCCESS_; } /** * Not called. */ int array_integrate( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ int index_y, int index_int_y_dx, ErrorMsg errmsg) { int i; double sum; if ((index_int_y_dx == index_x) || (index_int_y_dx == index_y)) { sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_int_y_dx,index_x,index_y); return _FAILURE_; } sum=0.; *(array+0*n_columns+index_int_y_dx)=sum; for (i=1; i<n_lines; i++) { sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y)) * (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x)); *(array+i*n_columns+index_int_y_dx)=sum; } return _SUCCESS_; } /** * Called by thermodynamics_init(). */ int array_integrate_ratio( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ int index_y1, int index_y2, int index_int_y1_over_y2_dx, ErrorMsg errmsg) { int i; double sum; if ((index_int_y1_over_y2_dx == index_x) || (index_int_y1_over_y2_dx == index_y1) || (index_int_y1_over_y2_dx == index_y2)) { sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d, %d and %d",__func__,__LINE__,index_int_y1_over_y2_dx,index_x,index_y1,index_y2); return _FAILURE_; } sum=0.; *(array+0*n_columns+index_int_y1_over_y2_dx)=sum; for (i=1; i<n_lines; i++) { sum += 0.5 * (*(array+i*n_columns+index_y1) / *(array+i*n_columns+index_y2) + *(array+(i-1)*n_columns+index_y1) / *(array+(i-1)*n_columns+index_y2)) * (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x)); *(array+i*n_columns+index_int_y1_over_y2_dx)=sum; } return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are all columns of the same array * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ double x, int * last_index, double * result, int result_size, /** from 1 to n_columns */ ErrorMsg errmsg) { int inf,sup,mid,i; double weight; inf=0; sup=n_lines-1; if (*(array+inf*n_columns+index_x) < *(array+sup*n_columns+index_x)){ if (x < *(array+inf*n_columns+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x)); return _FAILURE_; } if (x > *(array+sup*n_columns+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x)); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < *(array+mid*n_columns+index_x)) {sup=mid;} else {inf=mid;} } } else { if (x < *(array+sup*n_columns+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x)); return _FAILURE_; } if (x > *(array+inf*n_columns+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x)); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > *(array+mid*n_columns+index_x)) {sup=mid;} else {inf=mid;} } } *last_index = inf; weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x)); for (i=0; i<result_size; i++) *(result+i) = *(array+inf*n_columns+i) * (1.-weight) + weight * *(array+sup*n_columns+i); *(result+index_x) = x; return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are in different arrays * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate_spline( double * __restrict__ x_array, int n_lines, double * __restrict__ array, double * __restrict__ array_splined, int n_columns, double x, int * __restrict__ last_index, double * __restrict__ result, int result_size, /** from 1 to n_columns */ ErrorMsg errmsg) { int inf,sup,mid,i; double h,a,b; inf=0; sup=n_lines-1; if (x_array[inf] < x_array[sup]){ if (x < x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } if (x > x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } else { if (x < x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } if (x > x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > x_array[mid]) {sup=mid;} else {inf=mid;} } } *last_index = inf; h = x_array[sup] - x_array[inf]; b = (x-x_array[inf])/h; a = 1-b; for (i=0; i<result_size; i++) *(result+i) = a * *(array+inf*n_columns+i) + b * *(array+sup*n_columns+i) + ((a*a*a-a)* *(array_splined+inf*n_columns+i) + (b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.; return _SUCCESS_; } /** * Get the y[i] for which y[i]>c * * Called by nonlinear_HMcode() */ int array_search_bisect( int n_lines, double * __restrict__ array, double c, int * __restrict__ last_index, ErrorMsg errmsg) { int inf,sup,mid; inf=0; sup=n_lines-1; if (array[inf] < array[sup]){ if (c < array[inf]) { sprintf(errmsg,"%s(L:%d) : c=%e < y_min=%e",__func__,__LINE__,c,array[inf]); return _FAILURE_; } if (c > array[sup]) { sprintf(errmsg,"%s(L:%d) : c=%e > y_max=%e",__func__,__LINE__,c,array[sup]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (c < array[mid]) {sup=mid;} else {inf=mid;} } } else { if (c < array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,c,array[sup]); return _FAILURE_; } if (c > array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,c,array[inf]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (c > array[mid]) {sup=mid;} else {inf=mid;} } } *last_index = inf; return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are in different arrays * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate_linear( double * x_array, int n_lines, double * array, int n_columns, double x, int * last_index, double * result, int result_size, /** from 1 to n_columns */ ErrorMsg errmsg) { int inf,sup,mid,i; double h,a,b; inf=0; sup=n_lines-1; if (x_array[inf] < x_array[sup]){ if (x < x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } if (x > x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } else { if (x < x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } if (x > x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > x_array[mid]) {sup=mid;} else {inf=mid;} } } *last_index = inf; h = x_array[sup] - x_array[inf]; b = (x-x_array[inf])/h; a = 1-b; for (i=0; i<result_size; i++) *(result+i) = a * *(array+inf*n_columns+i) + b * *(array+sup*n_columns+i); return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are in different arrays * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate_logspline( double * x_array, int n_lines, double * array, double * array_logsplined, int n_columns, double x, int * last_index, double * result, int result_size, /** from 1 to n_columns */ ErrorMsg errmsg) { int inf,sup,mid,i; double h,a,b; inf=0; sup=n_lines-1; if (x_array[inf] < x_array[sup]){ if (x < x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } if (x > x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } else { if (x < x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } if (x > x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > x_array[mid]) {sup=mid;} else {inf=mid;} } } *last_index = inf; h = log(x_array[sup]) - log(x_array[inf]); b = (log(x)-log(x_array[inf]))/h; a = 1-b; for (i=0; i<result_size; i++) *(result+i) = exp( a * log(array[inf*n_columns+i]) + b * log(array[sup*n_columns+i]) + ((a*a*a-a)* array_logsplined[inf*n_columns+i] + (b*b*b-b)* array_logsplined[sup*n_columns+i])*h*h/6.); return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are in different arrays * * */ int array_interpolate_spline_one_column( double * x_array, int x_size, double * y_array, /* array of size x_size*y_size with elements y_array[index_y*x_size+index_x] */ int y_size, int index_y, double * ddy_array, /* array of size x_size*y_size */ double x, /* input */ double * y, /* output */ ErrorMsg errmsg ) { int inf,sup,mid; double h,a,b; inf=0; sup=x_size-1; if (x_array[inf] < x_array[sup]){ if (x < x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } if (x > x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } else { if (x < x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } if (x > x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > x_array[mid]) {sup=mid;} else {inf=mid;} } } h = x_array[sup] - x_array[inf]; b = (x-x_array[inf])/h; a = 1-b; *y = a * y_array[index_y * x_size + inf] + b * y_array[index_y * x_size + sup] + ((a*a*a-a)* ddy_array[index_y * x_size + inf] + (b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.; return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are in different arrays * * */ int array_interpolate_extrapolate_spline_one_column( double * x_array, int x_size, double * y_array, /* array of size x_size*y_size with elements y_array[index_y*x_size+index_x] */ int y_size, int index_y, double * ddy_array, /* array of size x_size*y_size */ double x, /* input */ double * y, /* output */ ErrorMsg errmsg ) { int inf,sup,mid; double h,a,b; if (x > x_array[x_size-2] || x < x_array[0]) { /*interpolate/extrapolate linearly y as a function of x*/ h = x_array[x_size-1] - x_array[x_size-2]; b = (x-x_array[x_size-2])/h; a = 1-b; *y = a * y_array[index_y * x_size + (x_size-2)] + b * y_array[index_y * x_size + (x_size-1)]; } else { /*interpolate y as a function of x with a spline*/ inf=0; sup=x_size-1; if (x_array[inf] < x_array[sup]){ if (x < x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } if (x > x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } else { if (x < x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } if (x > x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > x_array[mid]) {sup=mid;} else {inf=mid;} } } h = x_array[sup] - x_array[inf]; b = (x-x_array[inf])/h; a = 1-b; *y = a * y_array[index_y * x_size + inf] + b * y_array[index_y * x_size + sup] + ((a*a*a-a)* ddy_array[index_y * x_size + inf] + (b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.; } return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are in different arrays * * */ int array_interpolate_extrapolate_logspline_loglinear_one_column( double * x_array, int x_size, int x_stop, double * y_array, /* array of size x_size*y_size with elements y_array[index_y*x_size+index_x] */ int y_size, int index_y, double * ddlogy_array, /* array of size x_size*y_size */ double x, /* input */ double * y, /* output */ ErrorMsg errmsg ) { int inf,sup,mid; double h,a,b; if (x > x_array[x_stop-1]) { /*interpolate/extrapolate linearly ln(y) as a function of ln(x)*/ h = log(x_array[x_stop-1]) - log(x_array[x_stop-2]); b = (log(x)-log(x_array[x_stop-2]))/h; a = 1-b; /* *y = exp(a * log(y_array[index_y * x_size + (x_stop-2)]) + */ /* b * log(y_array[index_y * x_size + (x_stop-1)])); */ *y = exp(log(y_array[index_y * x_size + (x_stop-1)]) +(log(x)-log(x_array[x_stop-1])) *((log(y_array[index_y * x_size + (x_stop-1)])-log(y_array[index_y * x_size + (x_stop-2)]))/h +h/6.*(ddlogy_array[index_y * x_size + (x_stop-2)]+2.*ddlogy_array[index_y * x_size + (x_stop-1)]))); } else { /*interpolate ln(y) as a function of ln(x) with a spline*/ inf=0; sup=x_stop-1; if (x_array[inf] < x_array[sup]){ if (x < x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } if (x > x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } else { if (x < x_array[sup]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]); return _FAILURE_; } if (x > x_array[inf]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > x_array[mid]) {sup=mid;} else {inf=mid;} } } h = log(x_array[sup]) - log(x_array[inf]); b = (log(x)-log(x_array[inf]))/h; a = 1-b; *y = exp(a * log(y_array[index_y * x_size + inf]) + b * log(y_array[index_y * x_size + sup]) + ((a*a*a-a)* ddlogy_array[index_y * x_size + inf] + (b*b*b-b)* ddlogy_array[index_y * x_size + sup])*h*h/6.); } return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function. * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate_growing_closeby( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ double x, int * last_index, double * result, int result_size, /** from 1 to n_columns */ ErrorMsg errmsg) { int inf,sup,i; double weight; inf = *last_index; sup = *last_index+1; while (x < *(array+inf*n_columns+index_x)) { inf--; if (inf < 0) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__, x,array[index_x]); return _FAILURE_; } } sup = inf+1; while (x > *(array+sup*n_columns+index_x)) { sup++; if (sup > (n_lines-1)) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__, x,array[(n_lines-1)*n_columns+index_x]); return _FAILURE_; } } inf = sup-1; *last_index = inf; weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x)); for (i=0; i<result_size; i++) *(result+i) = *(array+inf*n_columns+i) * (1.-weight) + weight * *(array+sup*n_columns+i); *(result+index_x) = x; return _SUCCESS_; } /** * interpolate to get y(x), when x and y are two columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function. * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate_one_growing_closeby( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ double x, int * last_index, int index_y, double * result, ErrorMsg errmsg) { int inf,sup; double weight; inf = *last_index; sup = *last_index+1; while (x < *(array+inf*n_columns+index_x)) { inf--; if (inf < 0) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__, x,array[index_x]); return _FAILURE_; } } sup = inf+1; while (x > *(array+sup*n_columns+index_x)) { sup++; if (sup > (n_lines-1)) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__, x,array[(n_lines-1)*n_columns+index_x]); return _FAILURE_; } } inf = sup-1; *last_index = inf; weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x)); *result = *(array+inf*n_columns+index_y) * (1.-weight) + *(array+sup*n_columns+index_y) * weight; return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably very close to the previous point x from the last call of this function. * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate_spline_growing_closeby( double * x_array, int n_lines, double * array, double * array_splined, int n_columns, double x, int * last_index, double * result, int result_size, /** from 1 to n_columns */ ErrorMsg errmsg) { int inf,sup,i; double h,a,b; /* if (*last_index < 0) { sprintf(errmsg,"%s(L:%d) problem with last_index =%d < 0",__func__,__LINE__,*last_index); return _FAILURE_; } if (*last_index > (n_lines-1)) { sprintf(errmsg,"%s(L:%d) problem with last_index =%d > %d",__func__,__LINE__,*last_index,n_lines-1); return _FAILURE_; } */ inf = *last_index; class_test(inf<0 || inf>(n_lines-1), errmsg, "*lastindex=%d out of range [0:%d]\n",inf,n_lines-1); while (x < x_array[inf]) { inf--; if (inf < 0) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__, x,x_array[0]); return _FAILURE_; } } sup = inf+1; while (x > x_array[sup]) { sup++; if (sup > (n_lines-1)) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__, x,x_array[n_lines-1]); return _FAILURE_; } } inf = sup-1; *last_index = inf; h = x_array[sup] - x_array[inf]; b = (x-x_array[inf])/h; a = 1-b; for (i=0; i<result_size; i++) *(result+i) = a * *(array+inf*n_columns+i) + b * *(array+sup*n_columns+i) + ((a*a*a-a)* *(array_splined+inf*n_columns+i) + (b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.; return _SUCCESS_; } /** * interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close (but maybe not so close) to the previous point x from the last call of this function. * * Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z(). */ int array_interpolate_spline_growing_hunt( double * x_array, int n_lines, double * array, double * array_splined, int n_columns, double x, int * last_index, double * result, int result_size, /** from 1 to n_columns */ ErrorMsg errmsg) { int inf,sup,mid,i,inc; double h,a,b; inc=1; if (x >= x_array[*last_index]) { if (x > x_array[n_lines-1]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__, x,x_array[n_lines-1]); return _FAILURE_; } /* try closest neighboor upward */ inf = *last_index; sup = inf + inc; if (x > x_array[sup]) { /* hunt upward */ while (x > x_array[sup]) { inf = sup; inc += 1; sup += inc; if (sup > n_lines-1) { sup = n_lines-1; } } /* bisect */ while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } } else { if (x < x_array[0]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__, x,x_array[0]); return _FAILURE_; } /* try closest neighboor downward */ sup = *last_index; inf = sup - inc; if (x < x_array[inf]) { /* hunt downward */ while (x < x_array[inf]) { sup = inf; inc += 1; inf -= inc; if (inf < 0) { inf = 0; } } /* bisect */ while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < x_array[mid]) {sup=mid;} else {inf=mid;} } } } *last_index = inf; h = x_array[sup] - x_array[inf]; b = (x-x_array[inf])/h; a = 1-b; for (i=0; i<result_size; i++) *(result+i) = a * *(array+inf*n_columns+i) + b * *(array+sup*n_columns+i) + ((a*a*a-a)* *(array_splined+inf*n_columns+i) + (b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.; return _SUCCESS_; } /** * interpolate linearily to get y_i(x), when x and y_i are in two different arrays * * Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta(). */ int array_interpolate_two( double * array_x, int n_columns_x, int index_x, /** from 0 to (n_columns_x-1) */ double * array_y, int n_columns_y, int n_lines, /** must be the same for array_x and array_y */ double x, double * result, int result_size, /** from 1 to n_columns_y */ ErrorMsg errmsg) { int inf,sup,mid,i; double weight; inf=0; sup=n_lines-1; if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){ if (x < array_x[inf*n_columns_x+index_x]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]); return _FAILURE_; } if (x > array_x[sup*n_columns_x+index_x]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;} else {inf=mid;} } } else { if (x < *(array_x+sup*n_columns_x+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x)); return _FAILURE_; } if (x > *(array_x+inf*n_columns_x+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x)); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;} else {inf=mid;} } } weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x)); for (i=0; i<result_size; i++) *(result+i) = *(array_y+i*n_lines+inf) * (1.-weight) + weight * *(array_y+i*n_lines+sup) ; return _SUCCESS_; } /** * Same as array_interpolate_two, but with order of indices exchanged in array_y */ int array_interpolate_two_bis( double * array_x, int n_columns_x, int index_x, /** from 0 to (n_columns_x-1) */ double * array_y, int n_columns_y, int n_lines, /** must be the same for array_x and array_y */ double x, double * result, int result_size, /** from 1 to n_columns_y */ ErrorMsg errmsg) { int inf,sup,mid,i; double weight; inf=0; sup=n_lines-1; if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){ if (x < array_x[inf*n_columns_x+index_x]) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]); return _FAILURE_; } if (x > array_x[sup*n_columns_x+index_x]) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;} else {inf=mid;} } } else { if (x < *(array_x+sup*n_columns_x+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x)); return _FAILURE_; } if (x > *(array_x+inf*n_columns_x+index_x)) { sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x)); return _FAILURE_; } while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;} else {inf=mid;} } } weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x)); for (i=0; i<result_size; i++) *(result+i) = *(array_y+inf*n_columns_y+i) * (1.-weight) + weight * *(array_y+sup*n_columns_y+i) ; return _SUCCESS_; } /** * interpolate linearily to get y_i(x), when x and y_i are in two different arrays * * Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta(). */ int array_interpolate_two_arrays_one_column( double * array_x, /* assumed to be a vector (i.e. one column array) */ double * array_y, int n_columns_y, int index_y, /* between 0 and (n_columns_y-1) */ int n_lines, /** must be the same for array_x and array_y */ double x, double * result, ErrorMsg errmsg) { int inf,sup,mid; double weight; double epsilon=1e-9; inf=0; sup=n_lines-1; if (array_x[inf] < array_x[sup]){ class_test(x < array_x[inf]-epsilon, errmsg, "x=%e < x_min=%e",x,array_x[inf]); class_test(x > array_x[sup]+epsilon, errmsg, "x=%e > x_max=%e",x,array_x[sup]); while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x < array_x[mid]) {sup=mid;} else {inf=mid;} } } else { class_test(x < array_x[sup]-epsilon, errmsg, "x=%e < x_min=%e",x,array_x[sup]); class_test(x > array_x[inf]+epsilon, errmsg, "x=%e > x_max=%e",x,array_x[inf]); while (sup-inf > 1) { mid=(int)(0.5*(inf+sup)); if (x > array_x[mid]) {sup=mid;} else {inf=mid;} } } weight=(x-array_x[inf])/(array_x[sup]-array_x[inf]); *result = array_y[index_y*n_lines+inf] * (1.-weight) + weight * array_y[index_y*n_lines+sup]; return _SUCCESS_; } /** * Called by transfer_solve(). */ int array_interpolate_equal( double * array, int n_columns, int n_lines, double x, double x_min, double x_max, double * result, ErrorMsg errmsg) { int index_minus,i; double x_step,x_minus,weight; if (x < x_min) { sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_min=%e",__func__,__LINE__,x,x_min); return _FAILURE_; } if (x > x_max) { sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_max=%e",__func__,__LINE__,x,x_max); return _FAILURE_; } x_step = (x_max-x_min)/(n_lines-1); index_minus = (int)((x-x_min)/x_step); x_minus = index_minus * x_step; weight = (x-x_minus) / x_step; for (i=0; i<n_columns; i++) result[i] = *(array+n_columns*index_minus+i)*(1.-weight) + *(array+n_columns*(index_minus+1)+i)*weight; return _SUCCESS_; } /** * cubic interpolation of array with equally space abscisses */ int array_interpolate_cubic_equal( double x0, double dx, double *yarray, int Nx, double x, double * result, ErrorMsg errmsg) { int i; double frac; class_test((dx > 0 && (x<x0 || x>x0+dx*(Nx-1))), errmsg, "x=%e out of range [%e %e]",x,x0,x0+dx*(Nx-1)); class_test((dx < 0 && (x>x0 || x<x0+dx*(Nx-1))), errmsg, "x=%e out of range [%e %e]",x,x0+dx*(Nx-1),x0); i = (int)floor((x-x0)/dx); if (i<1) i=1; if (i>Nx-3) i=Nx-3; frac = (x-x0)/dx-i; yarray += i-1; *result=-yarray[0]*frac*(1.-frac)*(2.-frac)/6. +yarray[1]*(1.+frac)*(1.-frac)*(2.-frac)/2. +yarray[2]*(1.+frac)*frac*(2.-frac)/2. +yarray[3]*(1.+frac)*frac*(frac-1.)/6.; return _SUCCESS_; } int array_interpolate_parabola(double x1, double x2, double x3, double x, double y1, double y2, double y3, double * y, double * dy, double * ddy, ErrorMsg errmsg) { double a,b,c; /* a x_i**2 + b x_i + c = y_i a (x1**2-x2**2) + b (x1-x2) = y1-y2 a (x3**2-x2**2) + b (x3-x2) = y3-y2 a (x1**2-x2**2)(x3**2-x2**2) + b (x1-x2)(x3**2-x2**2) = (y1-y2)(x3**2-x2**2) a (x3**2-x2**2)(x1**2-x2**2) + b (x3-x2)(x1**2-x2**2) = (y3-y2)(x1**2-x2**2) b = [(y1-y2)(x3**2-x2**2) - (y3-y2)(x1**2-x2**2)]/(x1-x2)(x3-x2)(x3-x1) */ b = ((y1-y2)*(x3-x2)*(x3+x2) - (y3-y2)*(x1-x2)*(x1+x2))/(x1-x2)/(x3-x2)/(x3-x1); a = (y1-y2-b*(x1-x2))/(x1-x2)/(x1+x2); c = y2 - b*x2 - a*x2*x2; *y = a*x*x + b*x + c; *dy = 2.*a*x + b; *ddy = 2.*a; return _SUCCESS_; } /** * Called by transfer_solve(). */ int array_integrate_all( double * array, int n_columns, int n_lines, int index_x, /** from 0 to (n_columns-1) */ int index_y, double *result) { int i; double sum; sum=0.; for (i=1; i<n_lines; i++) { sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y)) * (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x)); } *result = sum; return _SUCCESS_; } int array_smooth_trg(double * array, int k_size, int starting_k, int eta_size, int index_eta, int radius, /*3, 5 or 7 */ ErrorMsg errmsg) { double * smooth; int i,j,jmin,jmax; double weigth; double *coeff; smooth=malloc(k_size*sizeof(double)); if (smooth == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__); return _FAILURE_; } class_calloc(coeff,2*radius+1,sizeof(double),errmsg); switch(radius){ case 3: weigth = 21; coeff[0] = -2; coeff[1] = 3; coeff[2] = 6; coeff[3] = 7; coeff[4] = 6; coeff[5] = 3; coeff[6] = -2; break; case 4: weigth = 231; coeff[0] = -21; coeff[1] = 14; coeff[2] = 39; coeff[3] = 54; coeff[4] = 59; coeff[5] = 54; coeff[6] = 39; coeff[7] = 14; coeff[8] = -21; break; case 5: weigth = 429; coeff[0] = -36; coeff[1] = 9; coeff[2] = 44; coeff[3] = 69; coeff[4] = 84; coeff[5] = 89; coeff[6] = 84; coeff[7] = 69; coeff[8] = 44; coeff[9] = 9; coeff[10] = -36; break; case 6: weigth = 143; coeff[0] = -11; coeff[1] = 0; coeff[2] = 9; coeff[3] = 16; coeff[4] = 21; coeff[5] = 24; coeff[6] = 25; coeff[7] = 24; coeff[8] = 21; coeff[9] = 16; coeff[10] = 9; coeff[11] = 0; coeff[12] = -11; break; case 7: weigth = 1105; coeff[0] = -78; coeff[1] = -13; coeff[2] = 42; coeff[3] = 87; coeff[4] = 122; coeff[5] = 147; coeff[6] = 162; coeff[7] = 167; coeff[8] = 162; coeff[9] = 147; coeff[10] = 122; coeff[11] = 87; coeff[12] = 42; coeff[13] = -13; coeff[14] = -78; break; /* case 8: */ default: class_stop(errmsg,"Non valid radius %d: please chose between 3 4 5 or 6\n",radius); weigth=0; break; } for (i=starting_k; i<k_size-radius; i++) { smooth[i]=0.; jmin = MAX(i-radius,0); jmax = MIN(i+radius,k_size-1); for (j=jmin; j <= jmax; j++) { smooth[i] += coeff[j-jmin]*array[j+k_size*index_eta]; } smooth[i] /= weigth; } for (i=starting_k; i<k_size-radius; i++) array[i+k_size*index_eta] = smooth[i]; free(smooth); free(coeff); return _SUCCESS_; } int array_smooth(double * array, int n_columns, int n_lines, int index, /** from 0 to (n_columns-1) */ int radius, ErrorMsg errmsg) { double * smooth; int i,j,jmin,jmax; double weigth; smooth=malloc(n_lines*sizeof(double)); if (smooth == NULL) { sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__); return _FAILURE_; } for (i=0; i<n_lines; i++) { smooth[i]=0.; weigth=0.; jmin = MAX(i-radius,0); jmax = MIN(i+radius,n_lines-1); for (j=jmin; j <= jmax; j++) { smooth[i] += array[j*n_columns+index]; weigth += 1.; } smooth[i] /= weigth; } for (i=0; i<n_lines; i++) array[i*n_columns+index] = smooth[i]; free(smooth); return _SUCCESS_; } /** * Compute quadrature weights for the trapezoidal integration method, xhen x is in gorwing order. * * @param x Input: Grid points on which f() is known. * @param n Input: number of grid points. * @param w_trapz Output: Weights of the trapezoidal method. * @return the error status */ int array_trapezoidal_weights( double * __restrict__ x, int n, double * __restrict__ w_trapz, ErrorMsg errmsg ) { int i; /* Case with just one point, w would normally be 0. */ if (n==1){ w_trapz[0] = 0.0; } else if (n>1){ //Set edgeweights: w_trapz[0] = 0.5*(x[1]-x[0]); w_trapz[n-1] = 0.5*(x[n-1]-x[n-2]); //Set inner weights: for (i=1; i<(n-1); i++){ w_trapz[i] = 0.5*(x[i+1]-x[i-1]); } } return _SUCCESS_; } /** * Compute quadrature weights for the trapezoidal integration method, when x is in decreasing order. * * @param x Input: Grid points on which f() is known. * @param n Input: number of grid points. * @param w_trapz Output: Weights of the trapezoidal method. * @return the error status */ int array_trapezoidal_mweights( double * __restrict__ x, int n, double * __restrict__ w_trapz, ErrorMsg errmsg ) { int i; /* Case with just one point. */ if (n==1){ w_trapz[0] = 1.0; } else if (n>1){ //Set edgeweights: w_trapz[0] = 0.5*(x[0]-x[1]); w_trapz[n-1] = 0.5*(x[n-2]-x[n-1]); //Set inner weights: for (i=1; i<(n-1); i++){ w_trapz[i] = 0.5*(x[i-1]-x[i+1]); } } return _SUCCESS_; } /** * Compute integral of function using trapezoidal method. * * @param integrand Input: The function we are integrating. * @param n Input: Compute integral on grid [0;n-1]. * @param w_trapz Input: Weights of the trapezoidal method. * @param I Output: The integral. * @return the error status */ int array_trapezoidal_integral( double * __restrict__ integrand, int n, double * __restrict__ w_trapz, double * __restrict__ I, ErrorMsg errmsg ) { int i; double res=0.0; for (i=0; i<n; i++){ res += integrand[i]*w_trapz[i]; } *I = res; return _SUCCESS_; } /** * Compute convolution integral of product of two functions using trapezoidal method. * * @param integrand1 Input: Function 1. * @param integrand2 Input: Function 2. * @param n Input: Compute integral on grid [0;n-1]. * @param w_trapz Input: Weights of the trapezoidal method. * @param I Output: The integral. * @return the error status */ int array_trapezoidal_convolution( double * __restrict__ integrand1, double * __restrict__ integrand2, int n, double * __restrict__ w_trapz, double * __restrict__ I, ErrorMsg errmsg ) { int i; double res=0.0; for (i=0; i<n; i++){ res += integrand1[i]*integrand2[i]*w_trapz[i]; } *I = res; return _SUCCESS_; }
par_multi_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * hypre_ParAMGBuildMultipass * This routine implements Stuben's direct interpolation with multiple passes. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildMultipassHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, hypre_ParCSRMatrix **P_ptr ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = NULL; //HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; /*HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = NULL;*/ HYPRE_Int num_cols_offd; hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_diag_j; hypre_CSRMatrix *P_offd; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_offd_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_Int *send_map_start; HYPRE_Int *send_map_elmt; HYPRE_Int *send_procs; HYPRE_Int num_recvs = 0; HYPRE_Int *recv_vec_start; HYPRE_Int *recv_procs; HYPRE_Int *new_recv_vec_start = NULL; HYPRE_Int **Pext_send_map_start = NULL; HYPRE_Int **Pext_recv_vec_start = NULL; HYPRE_Int *Pext_start = NULL; HYPRE_Int *P_ncols = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; HYPRE_Int *P_marker; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *C_array; HYPRE_Int *C_array_offd = NULL; HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */ HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first point of pass j contained in pass_array */ HYPRE_Int *P_diag_start; HYPRE_Int *P_offd_start = NULL; HYPRE_Int **P_diag_pass; HYPRE_Int **P_offd_pass = NULL; HYPRE_Int **Pext_pass = NULL; HYPRE_BigInt *big_temp_pass = NULL; HYPRE_BigInt **new_elmts = NULL; /* new neighbors generated in each pass */ HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for each pass */ HYPRE_Int *loc = NULL; /* contains locations for new neighbor connections in int_o_buffer to avoid searching */ HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero cols of off proc neighbors */ HYPRE_BigInt *Pext_send_buffer = NULL; /* used to collect global nonzero col ids in P_diag for send_map_elmts */ HYPRE_Int *map_S_to_new = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_Int *permute = NULL; HYPRE_BigInt *big_permute = NULL; HYPRE_Int cnt; HYPRE_Int cnt_nz; HYPRE_Int total_nz; HYPRE_Int pass; HYPRE_Int num_passes; HYPRE_Int max_num_passes = 10; HYPRE_Int n_fine; HYPRE_Int n_coarse = 0; HYPRE_Int n_coarse_offd = 0; HYPRE_Int n_SF = 0; HYPRE_Int n_SF_offd = 0; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *assigned = NULL; HYPRE_Int *assigned_offd = NULL; HYPRE_Real *Pext_send_data = NULL; HYPRE_Real *Pext_data = NULL; HYPRE_Real sum_C, sum_N; HYPRE_Real sum_C_pos, sum_C_neg; HYPRE_Real sum_N_pos, sum_N_neg; HYPRE_Real diagonal; HYPRE_Real alfa = 1.0; HYPRE_Real beta = 1.0; HYPRE_Int j_start; HYPRE_Int j_end; HYPRE_Int i, i1; HYPRE_Int j, j1; HYPRE_Int k, k1, k2, k3; HYPRE_BigInt big_k1; HYPRE_Int pass_array_size; HYPRE_BigInt global_pass_array_size; HYPRE_BigInt local_pass_array_size; HYPRE_Int my_id, num_procs; HYPRE_Int index, start; HYPRE_BigInt my_first_cpt; HYPRE_BigInt total_global_cpts; HYPRE_Int p_cnt; HYPRE_Int total_nz_offd; HYPRE_Int cnt_nz_offd; HYPRE_Int cnt_offd, cnt_new; HYPRE_Int no_break; HYPRE_Int not_found; HYPRE_Int Pext_send_size; HYPRE_Int Pext_recv_size; HYPRE_Int old_Pext_send_size; HYPRE_Int old_Pext_recv_size; HYPRE_Int P_offd_size = 0; HYPRE_Int local_index = -1; HYPRE_Int new_num_cols_offd = 0; HYPRE_Int num_cols_offd_P; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop; HYPRE_Int pass_length; HYPRE_Int *tmp_marker, *tmp_marker_offd; HYPRE_Int *tmp_array, *tmp_array_offd; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cnt_nz_per_thread; HYPRE_Int * cnt_nz_offd_per_thread; /* HYPRE_Real wall_time; wall_time = hypre_MPI_Wtime(); */ /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] = 0; cnt_nz_per_thread[i] = 0; } /*----------------------------------------------------------------------- * Access the CSR vectors for A and S. Also get size of fine grid. *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); my_first_cpt = num_cpts_global[0]; /* total_global_cpts = 0; */ if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } } //col_map_offd = col_map_offd_A; num_cols_offd = num_cols_offd_A; if (num_cols_offd_A) { A_offd_data = hypre_CSRMatrixData(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); } if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } n_fine = hypre_CSRMatrixNumRows(A_diag); /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } n_coarse = 0; n_SF = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) if (CF_marker[i] == 1) { n_coarse++; } else if (CF_marker[i] == -3) { n_SF++; } pass_array_size = n_fine - n_coarse - n_SF; if (pass_array_size) { pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size, HYPRE_MEMORY_HOST); } pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes + 1, HYPRE_MEMORY_HOST); if (n_fine) { assigned = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); if (n_coarse) { C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); if (send_map_start[num_sends]) { int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_start[num_sends], HYPRE_MEMORY_HOST); } } index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = CF_marker[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = dof_func[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } n_coarse_offd = 0; n_SF_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) if (CF_marker_offd[i] == 1) { n_coarse_offd++; } else if (CF_marker_offd[i] == -3) { n_SF_offd++; } if (num_cols_offd) { assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------------- * First Pass: determine the maximal size of P, and elementsPerRow[i]. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Assigned points are points for which we know an interpolation * formula already, and which are thus available to interpolate from. * assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending * in which pass their interpolation formula is determined. * * pass_array contains the points ordered according to its pass, i.e. * | C-points | points of pass 1 | points of pass 2 | .... * C_points are points 0 through pass_pointer[1]-1, * points of pass k (0 < k < num_passes) are contained in points * pass_pointer[k] through pass_pointer[k+1]-1 of pass_array . * * pass_array is also used to avoid going through all points for each pass, * i,e. at the bginning it contains all points in descending order starting * with n_fine-1. Then starting from the last point, we evaluate whether * it is a C_point (pass 0). If it is the point is brought to the front * and the length of the points to be searched is shortened. This is * done until the parameter cnt (which determines the first point of * pass_array to be searched) becomes n_fine. Then all points have been * assigned a pass number. *-----------------------------------------------------------------------*/ cnt = 0; p_cnt = pass_array_size - 1; P_diag_i[0] = 0; P_offd_i[0] = 0; for (i = 0; i < n_fine; i++) { if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt; /* this C point is assigned index coarse_counter on coarse grid, and in column of P */ C_array[cnt++] = i; assigned[i] = 0; P_diag_i[i + 1] = 1; /* one element in row i1 of P */ P_offd_i[i + 1] = 0; } else if (CF_marker[i] == -1) { pass_array[p_cnt--] = i; P_diag_i[i + 1] = 0; P_offd_i[i + 1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } else { P_diag_i[i + 1] = 0; P_offd_i[i + 1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } } index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { big_buf_data[index] = (HYPRE_BigInt)fine_to_coarse[send_map_elmt[j]]; if (big_buf_data[index] > -1) { big_buf_data[index] += my_first_cpt; } index++; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } new_recv_vec_start = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); if (n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); } cnt = 0; new_recv_vec_start[0] = 0; for (j = 0; j < num_recvs; j++) { for (i = recv_vec_start[j]; i < recv_vec_start[j + 1]; i++) { if (CF_marker_offd[i] == 1) { map_S_to_new[i] = cnt; C_array_offd[cnt] = i; new_col_map_offd[cnt++] = fine_to_coarse_offd[i]; assigned_offd[i] = 0; } else { assigned_offd[i] = -1; map_S_to_new[i] = -1; } } new_recv_vec_start[j + 1] = cnt; } cnt = 0; hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Mark all local neighbors of C points as 'assigned'. *-----------------------------------------------------------------------*/ pass_pointer[0] = 0; pass_pointer[1] = 0; total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */ total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */ cnt = 0; cnt_offd = 0; cnt_nz = 0; cnt_nz_offd = 0; for (i = pass_array_size - 1; i > cnt - 1; i--) { i1 = pass_array[i]; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_i[i1 + 1]++; cnt_nz++; assigned[i1] = 1; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_i[i1 + 1]++; cnt_nz_offd++; assigned[i1] = 1; } } if (assigned[i1] == 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; } } pass_pointer[2] = cnt; /*----------------------------------------------------------------------- * All local neighbors are assigned, now need to exchange the boundary * info for assigned strong neighbors. *-----------------------------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*----------------------------------------------------------------------- * Now we need to determine strong neighbors of points of pass 1, etc. * we need to update assigned_offd after each pass *-----------------------------------------------------------------------*/ pass = 2; local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt); hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); while (global_pass_array_size && pass < max_num_passes) { for (i = pass_array_size - 1; i > cnt - 1; i--) { i1 = pass_array[i]; no_break = 1; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; no_break = 0; break; } } if (no_break) { for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; break; } } } } /*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/ pass++; pass_pointer[pass] = cnt; local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt); hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); num_passes = pass; P_diag_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); /* P_diag_pass[i] will contain all column numbers for points of pass i */ P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST); P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /* P_diag_start[i] contains pointer to begin of column numbers in P_pass for point i, P_diag_i[i+1] contains number of columns for point i */ P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_procs > 1) { P_offd_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); if (cnt_nz_offd) { P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST); } else { P_offd_pass[1] = NULL; } new_elmts = hypre_CTAlloc(HYPRE_BigInt*, num_passes, HYPRE_MEMORY_HOST); new_counter = hypre_CTAlloc(HYPRE_Int, num_passes + 1, HYPRE_MEMORY_HOST); new_counter[0] = 0; new_counter[1] = n_coarse_offd; new_num_cols_offd = n_coarse_offd; new_elmts[0] = new_col_map_offd; } /*----------------------------------------------------------------------- * Pass 1: now we consider points of pass 1, with strong C_neighbors, *-----------------------------------------------------------------------*/ cnt_nz = 0; cnt_nz_offd = 0; /* JBS: Possible candidate for threading */ for (i = pass_pointer[1]; i < pass_pointer[2]; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; } } } total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; if (num_procs > 1) { tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd + 1, HYPRE_MEMORY_HOST); if (num_cols_offd) { Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } if (send_map_start[num_sends]) { P_ncols = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd + 1; i++) { Pext_i[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < send_map_start[num_sends]; i++) { P_ncols[i] = 0; } } old_Pext_send_size = 0; old_Pext_recv_size = 0; for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); Pext_send_size = 0; Pext_send_map_start[pass][0] = 0; for (i = 0; i < num_sends; i++) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE #endif for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { P_ncols[j] = P_diag_i[j1 + 1] + P_offd_i[j1 + 1]; Pext_send_size += P_ncols[j]; } } Pext_send_map_start[pass][i + 1] = Pext_send_size; } comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg, P_ncols, &Pext_i[1]); hypre_ParCSRCommHandleDestroy(comm_handle); if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST); Pext_send_buffer = hypre_CTAlloc(HYPRE_BigInt, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; } cnt_offd = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_start[j1]; j_end = j_start + P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_buffer[cnt_offd++] = my_first_cpt + (HYPRE_BigInt) P_diag_pass[pass - 1][k]; } j_start = P_offd_start[j1]; j_end = j_start + P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_offd_pass[pass - 1][k]; k3 = 0; while (k3 < pass - 1) { if (k1 < new_counter[k3 + 1]) { k2 = k1 - new_counter[k3]; Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2]; break; } k3++; } } } } } if (num_procs > 1) { Pext_recv_size = 0; Pext_recv_vec_start[pass][0] = 0; cnt_offd = 0; for (i = 0; i < num_recvs; i++) { for (j = recv_vec_start[i]; j < recv_vec_start[i + 1]; j++) { if (assigned_offd[j] == pass - 1) { Pext_start[j] = cnt_offd; cnt_offd += Pext_i[j + 1]; } } Pext_recv_size = cnt_offd; Pext_recv_vec_start[pass][i + 1] = Pext_recv_size; } hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; if (Pext_recv_size) { Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST); new_elmts[pass - 1] = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST); } else { Pext_pass[pass] = NULL; new_elmts[pass - 1] = NULL; } if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(loc, HYPRE_MEMORY_HOST); loc = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST); hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST); big_temp_pass = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (21, tmp_comm_pkg, Pext_send_buffer, big_temp_pass); hypre_ParCSRCommHandleDestroy(comm_handle); } cnt_new = 0; cnt_offd = 0; /* JBS: Possible candidate for threading */ for (i = 0; i < num_recvs; i++) { for (j = recv_vec_start[i]; j < recv_vec_start[i + 1]; j++) { if (assigned_offd[j] == pass - 1) { for (j1 = cnt_offd; j1 < cnt_offd + Pext_i[j + 1]; j1++) { big_k1 = big_temp_pass[j1]; k2 = (HYPRE_Int)(big_k1 - my_first_cpt); if (k2 > -1 && k2 < n_coarse) { Pext_pass[pass][j1] = -k2 - 1; } else { not_found = 1; k3 = 0; while (k3 < pass - 1 && not_found) { k2 = hypre_BigBinarySearch(new_elmts[k3], big_k1, (new_counter[k3 + 1] - new_counter[k3])); if (k2 > -1) { Pext_pass[pass][j1] = k2 + new_counter[k3]; not_found = 0; } else { k3++; } } if (not_found) { new_elmts[pass - 1][cnt_new] = big_k1; loc[cnt_new++] = j1; } } } cnt_offd += Pext_i[j + 1]; } } } if (cnt_new) { hypre_BigQsortbi(new_elmts[pass - 1], loc, 0, cnt_new - 1); cnt = 0; local_index = new_counter[pass - 1]; Pext_pass[pass][loc[0]] = local_index; for (i = 1; i < cnt_new; i++) { if (new_elmts[pass - 1][i] > new_elmts[pass - 1][cnt]) { new_elmts[pass - 1][++cnt] = new_elmts[pass - 1][i]; local_index++; } Pext_pass[pass][loc[i]] = local_index; } new_counter[pass] = local_index + 1; } else if (num_procs > 1) { new_counter[pass] = new_counter[pass - 1]; } if (new_num_cols_offd < local_index + 1) { new_num_cols_offd = local_index + 1; } pass_length = pass_pointer[pass + 1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd) #endif { /* Thread by computing the sparsity structure for this pass only over * each thread's range of rows. Rows are divided up evenly amongst * the threads. The necessary thread-wise temporary arrays, like * P_marker, are initialized and de-allocated internally to the * parallel region. */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_length; } else { thread_stop = (pass_length / num_threads) * (my_thread_num + 1); } thread_start += pass_pointer[pass]; thread_stop += pass_pointer[pass]; /* Local initializations */ cnt_nz = 0; cnt_nz_offd = 0; /* This block of code is to go to the top of the parallel region starting before * the loop over num_passes. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); /* marks points to see if they're counted */ for (i = 0; i < n_coarse; i++) { P_marker[i] = -1; } if (new_num_cols_offd == local_index + 1) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < new_num_cols_offd; i++) { P_marker_offd[i] = -1; } } else if (n_coarse_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_coarse_offd; i++) { P_marker_offd[i] = -1; } } /* Need some variables to store each threads cnt_nz and cnt_nz_offd, and * then stitch things together as in par_interp.c * This loop writes * P_diag_i, P_offd_i: data parallel here, and require no special treatment * P_diag_start, P_offd_start: are not data parallel, require special treatment */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_start[j1]; j_end = j_start + P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_diag_pass[pass - 1][k]; if (P_marker[k1] != i1) { cnt_nz++; P_diag_i[i1 + 1]++; P_marker[k1] = i1; } } j_start = P_offd_start[j1]; j_end = j_start + P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_offd_pass[pass - 1][k]; if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1 + 1]++; P_marker_offd[k1] = i1; } } } } j_start = 0; for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1 - 1] != i1) { cnt_nz++; P_diag_i[i1 + 1]++; P_marker[-k1 - 1] = i1; } } else if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1 + 1]++; P_marker_offd[k1] = i1; } } } } } /* Update P_diag_start, P_offd_start with cumulative * nonzero counts over all threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd; cnt_nz_per_thread[my_thread_num] = cnt_nz; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i - 1]; cnt_nz_per_thread[i] += cnt_nz_per_thread[i - 1]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { /* update this thread's section of P_diag_start and P_offd_start * with the num of nz's counted by previous threads */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] += cnt_nz_per_thread[my_thread_num - 1]; P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num - 1]; } } else /* if my_thread_num == 0 */ { /* Grab the nz count for all threads */ cnt_nz = cnt_nz_per_thread[max_num_threads[0] - 1]; cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0] - 1]; /* Updated total nz count */ total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; /* Allocate P_diag_pass and P_offd_pass for all threads */ P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST); if (cnt_nz_offd) { P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST); } else if (num_procs > 1) { P_offd_pass[pass] = NULL; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* offset cnt_nz and cnt_nz_offd to point to the starting * point in P_diag_pass and P_offd_pass for each thread */ if (my_thread_num > 0) { cnt_nz = cnt_nz_per_thread[my_thread_num - 1]; cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num - 1]; } else { cnt_nz = 0; cnt_nz_offd = 0; } /* Set P_diag_pass and P_offd_pass */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_start[j1]; j_end = j_start + P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_diag_pass[pass - 1][k]; if (P_marker[k1] != -i1 - 1) { P_diag_pass[pass][cnt_nz++] = k1; P_marker[k1] = -i1 - 1; } } j_start = P_offd_start[j1]; j_end = j_start + P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_offd_pass[pass - 1][k]; if (P_marker_offd[k1] != -i1 - 1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1 - 1; } } } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1 - 1] != -i1 - 1) { P_diag_pass[pass][cnt_nz++] = -k1 - 1; P_marker[-k1 - 1] = -i1 - 1; } } else if (P_marker_offd[k1] != -i1 - 1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1 - 1; } } } } } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if ( (n_coarse_offd) || (new_num_cols_offd == local_index + 1) ) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End parallel region */ } hypre_TFree(loc, HYPRE_MEMORY_HOST); hypre_TFree(P_ncols, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST); hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST); hypre_TFree(new_recv_vec_start, HYPRE_MEMORY_HOST); hypre_TFree(cnt_nz_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(cnt_nz_offd_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, total_nz, HYPRE_MEMORY_HOST); if (total_nz_offd) { P_offd_j = hypre_CTAlloc(HYPRE_Int, total_nz_offd, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, total_nz_offd, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) { P_diag_i[i + 1] += P_diag_i[i]; P_offd_i[i + 1] += P_offd_i[i]; } /* determine P for coarse points */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_coarse; i++) { i1 = C_array[i]; P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1]; P_diag_data[P_diag_i[i1]] = 1.0; } if (weight_option) /*if this is set, weights are separated into negative and positive offdiagonals and accumulated accordingly */ { pass_length = pass_pointer[2] - pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { P_marker_offd[i] = -1; } } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length / num_threads) * (my_thread_num + 1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_pos = 0; sum_C_neg = 0; sum_N_pos = 0; sum_N_neg = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; P_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) { sum_N_neg += A_diag_data[j]; } else { sum_N_pos += A_diag_data[j]; } } if (j1 != -1 && P_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; if (A_diag_data[j] < 0) { sum_C_neg += A_diag_data[j]; } else { sum_C_pos += A_diag_data[j]; } } } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; P_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) { if (A_offd_data[j] < 0) { sum_N_neg += A_offd_data[j]; } else { sum_N_pos += A_offd_data[j]; } } if (j1 != -1 && P_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; if (A_offd_data[j] < 0) { sum_C_neg += A_offd_data[j]; } else { sum_C_pos += A_offd_data[j]; } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg * diagonal != 0) { alfa = -sum_N_neg / (sum_C_neg * diagonal); } if (sum_C_pos * diagonal != 0) { beta = -sum_N_pos / (sum_C_pos * diagonal); } for (j = P_diag_i[i1]; j < cnt; j++) if (P_diag_data[j] < 0) { P_diag_data[j] *= alfa; } else { P_diag_data[j] *= beta; } for (j = P_offd_i[i1]; j < cnt_offd; j++) if (P_offd_data[j] < 0) { P_offd_data[j] *= alfa; } else { P_offd_data[j] *= beta; } } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End Parallel Region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) { hypre_TFree(C_array, HYPRE_MEMORY_HOST); } hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST); } for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST); } pass_length = pass_pointer[pass + 1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { P_marker_offd[i] = -1; } } C_array = NULL; C_array_offd = NULL; if (n_coarse) { C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } if (new_num_cols_offd > n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else if (n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length / num_threads) * (my_thread_num + 1); } /* Loop over each thread's row-range */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_neg = 0; sum_C_pos = 0; sum_N_neg = 0; sum_N_pos = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; cnt = P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; C_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; C_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { P_marker[j1] = i1; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { P_marker_offd[j1] = i1; } } for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (P_marker[j1] == i1) { for (k = P_diag_i[j1]; k < P_diag_i[j1 + 1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j] * P_diag_data[k]; P_diag_data[C_array[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } for (k = P_offd_i[j1]; k < P_offd_i[j1 + 1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j] * P_offd_data[k]; P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) { sum_N_neg += A_diag_data[j]; } else { sum_N_pos += A_diag_data[j]; } } } } for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (j1 > -1 && P_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j] * Pext_data[k]; if (k1 < 0) { P_diag_data[C_array[-k1 - 1]] += alfa; } else { P_offd_data[C_array_offd[k1]] += alfa; } if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) { if ( A_offd_data[j] < 0) { sum_N_neg += A_offd_data[j]; } else { sum_N_pos += A_offd_data[j]; } } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg * diagonal != 0) { alfa = -sum_N_neg / (sum_C_neg * diagonal); } if (sum_C_pos * diagonal != 0) { beta = -sum_N_pos / (sum_C_pos * diagonal); } for (j = P_diag_i[i1]; j < P_diag_i[i1 + 1]; j++) if (P_diag_data[j] < 0) { P_diag_data[j] *= alfa; } else { P_diag_data[j] *= beta; } for (j = P_offd_i[i1]; j < P_offd_i[i1 + 1]; j++) if (P_offd_data[j] < 0) { P_offd_data[j] *= alfa; } else { P_offd_data[j] *= beta; } } hypre_TFree(C_array, HYPRE_MEMORY_HOST); hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST); } } /* End num_passes for-loop */ } else /* no distinction between positive and negative offdiagonal element */ { pass_length = pass_pointer[2] - pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i = 0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length / num_threads) * (my_thread_num + 1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; tmp_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { sum_N += A_diag_data[j]; } if (j1 != -1 && tmp_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; sum_C += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; tmp_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) { sum_N += A_offd_data[j]; } if (j1 != -1 && tmp_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; sum_C += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C * diagonal != 0) { alfa = -sum_N / (sum_C * diagonal); } for (j = P_diag_i[i1]; j < cnt; j++) { P_diag_data[j] *= alfa; } for (j = P_offd_i[i1]; j < cnt_offd; j++) { P_offd_data[j] *= alfa; } } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST); } /* end OMP parallel region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) { hypre_TFree(C_array, HYPRE_MEMORY_HOST); } hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST); } for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST); } pass_length = pass_pointer[pass + 1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } tmp_array = NULL; if (n_coarse) { tmp_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } tmp_array_offd = NULL; if (new_num_cols_offd > n_coarse_offd) { tmp_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else { tmp_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);} for (i = 0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i = 0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length / num_threads) * (my_thread_num + 1); } for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; cnt = P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; tmp_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; tmp_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { tmp_marker[j1] = i1; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { tmp_marker_offd[j1] = i1; } } for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (tmp_marker[j1] == i1) { for (k = P_diag_i[j1]; k < P_diag_i[j1 + 1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j] * P_diag_data[k]; P_diag_data[tmp_array[k1]] += alfa; sum_C += alfa; sum_N += alfa; } for (k = P_offd_i[j1]; k < P_offd_i[j1 + 1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j] * P_offd_data[k]; P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { sum_N += A_diag_data[j]; } } } for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (j1 > -1 && tmp_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j] * Pext_data[k]; if (k1 < 0) { P_diag_data[tmp_array[-k1 - 1]] += alfa; } else { P_offd_data[tmp_array_offd[k1]] += alfa; } sum_C += alfa; sum_N += alfa; } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) { sum_N += A_offd_data[j]; } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C * diagonal != 0.0) { alfa = -sum_N / (sum_C * diagonal); } for (j = P_diag_i[i1]; j < P_diag_i[i1 + 1]; j++) { P_diag_data[j] *= alfa; } for (j = P_offd_i[i1]; j < P_offd_i[i1 + 1]; j++) { P_offd_data[j] *= alfa; } } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_array, HYPRE_MEMORY_HOST); hypre_TFree(tmp_array_offd, HYPRE_MEMORY_HOST); } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST); } } } hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_map_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_pass, HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_start, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(assigned, HYPRE_MEMORY_HOST); hypre_TFree(assigned_offd, HYPRE_MEMORY_HOST); hypre_TFree(pass_pointer, HYPRE_MEMORY_HOST); hypre_TFree(pass_array, HYPRE_MEMORY_HOST); hypre_TFree(map_S_to_new, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* Compress P, removing coefficients smaller than trunc_factor * Max and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */ if (trunc_factor != 0.0 || P_max_elmts != 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); } P_offd_size = P_offd_i[n_fine]; num_cols_offd_P = 0; if (P_offd_size) { if (new_num_cols_offd > num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < new_num_cols_offd; i++) { P_marker_offd[i] = 0; } num_cols_offd_P = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker_offd[index]) { num_cols_offd_P++; P_marker_offd[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P, HYPRE_MEMORY_HOST); permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes - 1], HYPRE_MEMORY_HOST); big_permute = hypre_CTAlloc(HYPRE_BigInt, new_counter[num_passes - 1], HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < new_counter[num_passes - 1]; i++) { big_permute[i] = -1; } cnt = 0; for (i = 0; i < num_passes - 1; i++) { for (j = new_counter[i]; j < new_counter[i + 1]; j++) { if (P_marker_offd[j]) { col_map_offd_P[cnt] = new_elmts[i][j - (HYPRE_BigInt)new_counter[i]]; big_permute[j] = col_map_offd_P[cnt++]; } } } hypre_BigQsort0(col_map_offd_P, 0, num_cols_offd_P - 1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < new_counter[num_passes - 1]; i++) { big_k1 = big_permute[i]; if (big_k1 != -1) { permute[i] = hypre_BigBinarySearch(col_map_offd_P, big_k1, num_cols_offd_P); } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_offd_j[i] = permute[P_offd_j[i]]; } hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { for (i = 0; i < num_passes - 1; i++) { hypre_TFree(new_elmts[i], HYPRE_MEMORY_HOST); } } hypre_TFree(permute, HYPRE_MEMORY_HOST); hypre_TFree(big_permute, HYPRE_MEMORY_HOST); hypre_TFree(new_elmts, HYPRE_MEMORY_HOST); hypre_TFree(new_counter, HYPRE_MEMORY_HOST); if (num_cols_offd_P) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P; } if (n_SF) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } } if (num_procs > 1) { hypre_MatvecCommPkgCreate(P); } *P_ptr = P; /* wall_time = hypre_MPI_Wtime() - wall_time; hypre_printf("TOTAL TIME %1.2e \n",wall_time); */ /*----------------------------------------------------------------------- * Build and return dof_func array for coarse grid. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Free mapping vector and marker array. *-----------------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime(); #endif return (0); } HYPRE_Int hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, hypre_ParCSRMatrix **P_ptr ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("MultipassInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_ParCSRMatrixMemoryLocation(S) ); if (exec == HYPRE_EXEC_DEVICE) { /* Notice: call the mod version on GPUs */ ierr = hypre_BoomerAMGBuildModMultipassDevice( A, CF_marker, S, num_cpts_global, trunc_factor, P_max_elmts, 9, num_functions, dof_func, P_ptr ); } else #endif { ierr = hypre_BoomerAMGBuildMultipassHost( A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, P_max_elmts, weight_option, P_ptr ); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
grid_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include <complex.h> #include "config.h" #include "cint.h" #include "vhf/fblas.h" #include "gto/grid_ao_drv.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) #define ALL_IMAGES 255 #define IMGBLK 40 #define OF_CMPLX 2 double CINTcommon_fac_sp(int l); void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv1(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv2(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv3(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv4(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_ip_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); /* * Extend the meaning of non0table: given shell ID and block ID, * non0table is the number of images in Ls that does not vanish. * Ls should be sorted based on the distance to center cell. */ void PBCnr_ao_screen(unsigned char *non0table, double *coords, int ngrids, double *Ls, int nimgs, int *atm, int natm, int *bas, int nbas, double *env) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel default(none) \ shared(Ls, nimgs, coords, ngrids, non0table, atm, natm, bas, nbas, env) { int i, j, m; int np, nc, atm_id; size_t bas_id, ib; double rr, arr, maxc; double logcoeff[NPRIMAX]; double dr[3]; double rL[3]; double *p_exp, *pcoeff, *ratm; #pragma omp for nowait schedule(dynamic) for (bas_id = 0; bas_id < nbas; bas_id++) { np = bas[NPRIM_OF+bas_id*BAS_SLOTS]; nc = bas[NCTR_OF +bas_id*BAS_SLOTS]; p_exp = env + bas[PTR_EXP+bas_id*BAS_SLOTS]; pcoeff = env + bas[PTR_COEFF+bas_id*BAS_SLOTS]; atm_id = bas[ATOM_OF+bas_id*BAS_SLOTS]; ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD]; for (j = 0; j < np; j++) { maxc = 0; for (i = 0; i < nc; i++) { maxc = MAX(maxc, fabs(pcoeff[i*np+j])); } logcoeff[j] = log(maxc); } for (ib = 0; ib < nblk; ib++) { for (m = nimgs-1; m >= 0; m--) { rL[0] = ratm[0] + Ls[m*3+0]; rL[1] = ratm[1] + Ls[m*3+1]; rL[2] = ratm[2] + Ls[m*3+2]; for (i = ib*BLKSIZE; i < MIN(ngrids, (ib+1)*BLKSIZE); i++) { dr[0] = coords[0*ngrids+i] - rL[0]; dr[1] = coords[1*ngrids+i] - rL[1]; dr[2] = coords[2*ngrids+i] - rL[2]; rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2]; for (j = 0; j < np; j++) { arr = p_exp[j] * rr; if (arr-logcoeff[j] < EXPCUTOFF) { non0table[ib*nbas+bas_id] = MIN(ALL_IMAGES, m+1); goto next_blk; } } } } non0table[ib*nbas+bas_id] = 0; next_blk:; } } } } static void _copy(double complex *out, double *ao_k, size_t ngrids, size_t bgrids, int nkpts, int ncomp, int nao, int ncol) { int i, j, k, ic; double complex *pout; double *ao_r, *ao_i; int blksize = ncomp * ncol * bgrids; for (k = 0; k < nkpts; k++) { ao_r = ao_k + k*2 * blksize; ao_i = ao_k +(k*2+1) * blksize; for (ic = 0; ic < ncomp; ic++) { pout = out + (k * ncomp + ic) * nao * ngrids; for (j = 0; j < ncol; j++) { for (i = 0; i < bgrids; i++) { pout[j*ngrids+i] = (ao_r[j*bgrids+i] + ao_i[j*bgrids+i]*_Complex_I); } } ao_r += ncol * bgrids; ao_i += ncol * bgrids; } } } // grid2atm[nimgs,xyz,grid_id] static void _fill_grid2atm(double *grid2atm, double *min_grid2atm, double *coord, double *Ls, double *r_atm, int atm_imag_max, size_t bgrids, size_t ngrids, int nimgs) { int ig, m; double rL[3]; double dist; double dist_min; for (m = 0; m < nimgs; m++) { if ((m < atm_imag_max || atm_imag_max == ALL_IMAGES)) { rL[0] = r_atm[0] + Ls[m*3+0]; rL[1] = r_atm[1] + Ls[m*3+1]; rL[2] = r_atm[2] + Ls[m*3+2]; dist_min = 1e9; for (ig = 0; ig < bgrids; ig++) { grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - rL[0]; grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - rL[1]; grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - rL[2]; dist = (grid2atm[0*BLKSIZE+ig]*grid2atm[0*BLKSIZE+ig] + grid2atm[1*BLKSIZE+ig]*grid2atm[1*BLKSIZE+ig] + grid2atm[2*BLKSIZE+ig]*grid2atm[2*BLKSIZE+ig]); dist_min = MIN(dist, dist_min); } min_grid2atm[m] = sqrt(dist_min); } grid2atm += 3*BLKSIZE; } } void PBCeval_cart_iter(FPtr_eval feval, FPtr_exp fexp, size_t nao, size_t ngrids, size_t bgrids, size_t offao, int param[], int *shls_slice, int *ao_loc, double *buf, double *Ls, double complex *expLk, int nimgs, int nkpts, int di_max, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nkpts2 = nkpts * OF_CMPLX; int i, j, k, l, np, nc, atm_id, bas_id, deg, ao_id; int iL, iL0, iLcount, dimc; int grid2atm_atm_id, count; double fac; double *p_exp, *pcoeff, *pcoord, *pao, *ri; double *grid2atm = buf; // shape [nimgs,3,bgrids] double *eprim = grid2atm + nimgs*3*BLKSIZE; double *aobuf = eprim + NPRIMAX*BLKSIZE*2; double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids; double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX; double complex *zLk_buf = (double complex *)Lk_buf; double *min_grid2atm = Lk_buf + IMGBLK * nkpts * OF_CMPLX; double *pexpLk; int img_idx[nimgs]; int atm_imag_max[natm]; for (i = 0; i < natm; i++) { atm_imag_max[i] = 0; } for (bas_id = sh0; bas_id < sh1; bas_id++) { atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]); } grid2atm_atm_id = -1; for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = (l+1)*(l+2)/2; dimc = nc*deg * ncomp * bgrids; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (grid2atm_atm_id != atm_id) { _fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri, atm_imag_max[atm_id], bgrids, ngrids, nimgs); grid2atm_atm_id = atm_id; } for (i = 0; i < nkpts2*dimc; i++) { aobufk[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); count = 0; for (iL = iL0; iL < iL0+iLcount; iL++) { pcoord = grid2atm + iL * 3*BLKSIZE; if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) && (min_grid2atm[iL] < rcut[bas_id]) && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) { pao = aobuf + count * dimc; (*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*deg, bgrids, bgrids); img_idx[count] = iL; count += 1; } } if (count > 0) { if (img_idx[count-1] != iL0 + count-1) { // some images are skipped for (i = 0; i < count; i++) { j = img_idx[i]; for (k = 0; k < nkpts; k++) { zLk_buf[i*nkpts+k] = expLk[j*nkpts+k]; } } pexpLk = Lk_buf; } else { pexpLk = (double *)(expLk + nkpts * iL0); } dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count, &D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc); } } _copy(ao+ao_id*ngrids+offao, aobufk, ngrids, bgrids, nkpts, ncomp, nao, nc*deg); } } void PBCeval_sph_iter(FPtr_eval feval, FPtr_exp fexp, size_t nao, size_t ngrids, size_t bgrids, size_t offao, int param[], int *shls_slice, int *ao_loc, double *buf, double *Ls, double complex *expLk, int nimgs, int nkpts, int di_max, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nkpts2 = nkpts * OF_CMPLX; int i, j, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id; int iL, iL0, iLcount, dimc; int grid2atm_atm_id, count; double fac; double *p_exp, *pcoeff, *pcoord, *pcart, *pao, *ri; double *grid2atm = buf; // shape [nimgs,3,bgrids] double *eprim = grid2atm + nimgs*3*BLKSIZE; double *aobuf = eprim + NPRIMAX*BLKSIZE*2; double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids; double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX; double complex *zLk_buf = (double complex *)Lk_buf; double *cart_gto = Lk_buf + IMGBLK * nkpts * OF_CMPLX; double *min_grid2atm = cart_gto + ncomp*NCTR_CART*bgrids; double *pexpLk; int img_idx[nimgs]; int atm_imag_max[natm]; for (i = 0; i < natm; i++) { atm_imag_max[i] = 0; } for (bas_id = sh0; bas_id < sh1; bas_id++) { atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]); } grid2atm_atm_id = -1; for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = l * 2 + 1; dcart = (l+1)*(l+2)/2; dimc = nc*deg * ncomp * bgrids; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (grid2atm_atm_id != atm_id) { _fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri, atm_imag_max[atm_id], bgrids, ngrids, nimgs); grid2atm_atm_id = atm_id; } for (i = 0; i < nkpts2*dimc; i++) { aobufk[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); count = 0; for (iL = iL0; iL < iL0+iLcount; iL++) { pcoord = grid2atm + iL * 3*BLKSIZE; if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) && (min_grid2atm[iL] < rcut[bas_id]) && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) { pao = aobuf + count * dimc; if (l <= 1) { // s, p functions (*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); } else { (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); pcart = cart_gto; for (i = 0; i < ncomp * nc; i++) { CINTc2s_ket_sph1(pao, pcart, bgrids, bgrids, l); pao += deg * bgrids; pcart += dcart * bgrids; } } img_idx[count] = iL; count++; } } if (count > 0) { if (img_idx[count-1] != iL0 + count-1) { // some images are skipped for (i = 0; i < count; i++) { j = img_idx[i]; for (k = 0; k < nkpts; k++) { zLk_buf[i*nkpts+k] = expLk[j*nkpts+k]; } } pexpLk = Lk_buf; } else { pexpLk = (double *)(expLk + nkpts * iL0); } dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count, &D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc); } } _copy(ao+ao_id*ngrids+offao, aobufk, ngrids, bgrids, nkpts, ncomp, nao, nc*deg); } } int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas); /* * blksize <= 1024 to avoid stack overflow * * non0table[ngrids/blksize,natm] is the T/F table for ao values to * screen the ao evaluation for each shell */ void PBCeval_loop(void (*fiter)(), FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int shloc[shls_slice[1]-shls_slice[0]+1]; const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas); const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; const size_t Ngrids = ngrids; int i; int di_max = 0; for (i = shls_slice[0]; i < shls_slice[1]; i++) { di_max = MAX(di_max, ao_loc[i+1] - ao_loc[i]); } #pragma omp parallel default(none) \ shared(fiter, feval, fexp, param, ngrids, \ Ls, nimgs, di_max, expLk, nkpts, shls_slice, ao_loc, \ ao, coord, rcut, non0table, atm, natm, bas, nbas, env, shloc) { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const size_t nao = ao_loc[sh1] - ao_loc[sh0]; int ip, ib, k, iloc, ish; size_t aoff, bgrids; size_t bufsize =((nimgs*3 + NPRIMAX*2 + nkpts *param[POS_E1]*param[TENSOR]*di_max * OF_CMPLX + IMGBLK*param[POS_E1]*param[TENSOR]*di_max + param[POS_E1]*param[TENSOR]*NCTR_CART) * BLKSIZE + nkpts * IMGBLK * OF_CMPLX + nimgs); double *buf = malloc(sizeof(double) * bufsize); #pragma omp for nowait schedule(dynamic, 1) for (k = 0; k < nblk*nshblk; k++) { iloc = k / nblk; ish = shloc[iloc]; ib = k - iloc * nblk; ip = ib * BLKSIZE; aoff = (ao_loc[ish] - ao_loc[sh0]) * Ngrids + ip; bgrids = MIN(ngrids-ip, BLKSIZE); (*fiter)(feval, fexp, nao, Ngrids, bgrids, aoff, param, shloc+iloc, ao_loc, buf, Ls, expLk, nimgs, nkpts, di_max, ao, coord+ip, rcut, non0table+ib*nbas, atm, natm, bas, nbas, env); } free(buf); } } void PBCeval_cart_drv(FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { PBCeval_loop(PBCeval_cart_iter, feval, fexp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCeval_sph_drv(FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { PBCeval_loop(PBCeval_sph_iter, feval, fexp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv0(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 1}; PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv0(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 1}; PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv1(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 4}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv1(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 4}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv2(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 10}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv2(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 10}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv3(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 20}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv3(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 20}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv4(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 35}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv4(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 35}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { // int param[] = {1, 1}; // PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, // ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, // ao, coord, rcut, non0table, atm, natm, bas, nbas, env); PBCGTOval_cart_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { // int param[] = {1, 1}; // PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, // ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, // ao, coord, rcut, non0table, atm, natm, bas, nbas, env); PBCGTOval_sph_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_ip_cart(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 3}; PBCeval_cart_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_ip_sph(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 3}; PBCeval_sph_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); }
task4_solution.c
#include <math.h> #include <string.h> #include "timer.h" #define NN 1024 #define NM 1024 float A[NN][NM]; float Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(float)); memset(Anew, 0, n * m * sizeof(float)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; #pragma acc data copy(A), create(Anew) while ( error > tol && iter < iter_max ) { #pragma acc kernels { error = 0.0; #pragma omp parallel for shared(m, n, Anew, A) for( int j = 1; j < n-1; j++) { #pragma acc loop gang(8) vector(32) for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } #pragma omp parallel for shared(m, n, Anew, A) for( int j = 1; j < n-1; j++) { #pragma acc loop gang(8) vector(32) for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); }
divsufsort.c
/* * divsufsort.c for libdivsufsort * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "divsufsort_private.h" #ifdef _OPENMP # include <omp.h> #endif /*- Private Functions -*/ /* Sorts suffixes of type B*. */ static saidx_t sort_typeBstar(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n) { saidx_t *PAb, *ISAb, *buf; #ifdef _OPENMP saidx_t *curbuf; saidx_t l; #endif saidx_t i, j, k, t, m, bufsize; saint_t c0, c1; #ifdef _OPENMP saint_t d0, d1; int tmp; #endif /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef _OPENMP tmp = omp_get_max_threads(); buf = SA + m, bufsize = (n - (2 * m)) / tmp; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp) { tmp = omp_get_thread_num(); curbuf = buf + tmp * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ buf = ISAb + m, bufsize = n - (2 * m); trsort(ISAb, SA, m, 1, buf, bufsize); /* Set the sorted order of type B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static saidx_t construct_BWT(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k, *orig; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((saidx_t)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /*---------------------------------------------------------------------------*/ /*- Function -*/ saint_t divsufsort(const sauchar_t *T, saidx_t *SA, saidx_t n) { saidx_t *bucket_A, *bucket_B; saidx_t m; saint_t err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Suffixsort. */ if((bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, SA, bucket_A, bucket_B, n); construct_SA(T, SA, bucket_A, bucket_B, n, m); } else { err = -2; } free(bucket_B); free(bucket_A); return err; } saidx_t divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) { saidx_t *B; saidx_t *bucket_A, *bucket_B; saidx_t m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n); pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; } for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } const char * divsufsort_version(void) { return PROJECT_VERSION_FULL; }
GB_binop__gt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__gt_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__gt_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint8) // A*D function (colscale): GB (_AxD__gt_uint8) // D*A function (rowscale): GB (_DxB__gt_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__gt_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__gt_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint8) // C=scalar+B GB (_bind1st__gt_uint8) // C=scalar+B' GB (_bind1st_tran__gt_uint8) // C=A+scalar GB (_bind2nd__gt_uint8) // C=A'+scalar GB (_bind2nd_tran__gt_uint8) // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__gt_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__gt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
game_utils.c
#include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> #include "../headers/game_utils.h" #include "../../common/headers/io_utils.h" int exec_game_rules(char** prev_gen, char** next_gen, int i, int j, int alive_neighbours) { int changed = 0; // rules regarding an alive cell if (prev_gen[i][j] == '1') { if (alive_neighbours != 2 && alive_neighbours != 3) { next_gen[i][j] = '0'; changed = 1; } else { next_gen[i][j] = '1'; } } // rules regarding dead cells else { // exactly 3 neighbours -> a new cell is born if (alive_neighbours == 3) { next_gen[i][j] = '1'; changed = 1; } else { next_gen[i][j] = '0'; } } return changed; } int calculate_inner_gen(char** prev_gen, char** next_gen, subgrid_info* subgrid) { int changed = 0; #pragma omp parallel for collapse(2) schedule(static) for (int i = 2; i <= subgrid->rows-1; i++) { for (int j = 2; j <= subgrid->cols-1; j++) { // compute how many organizations exist near the cell we are about to evolve int alive_neighbours = (prev_gen[i-1][j-1] == '1') + (prev_gen[i-1][j] == '1') + (prev_gen[i-1][j+1] == '1') + (prev_gen[i][j-1] == '1') + (prev_gen[i][j+1] == '1') + (prev_gen[i+1][j-1] == '1') + (prev_gen[i+1][j] == '1') + (prev_gen[i+1][j+1] == '1'); // printf("(%d, %d) - N: %d - RANK: %d\n",i,j,alive_neighbours,rank); if (exec_game_rules(prev_gen, next_gen, i, j, alive_neighbours) == 1) { changed = 1; } } } return changed; } int calculate_outter_gen(char** prev_gen, char** next_gen, subgrid_info* subgrid) { int alive_neighbours, changed = 0; // First row #pragma omp parallel for schedule(static) for (int j = 1; j <= subgrid->cols; ++j) { int i = 1; int alive_neighbours = (prev_gen[i-1][j-1] == '1') + (prev_gen[i-1][j] == '1') + (prev_gen[i-1][j+1] == '1') + (prev_gen[i][j-1] == '1') + (prev_gen[i][j+1] == '1') + (prev_gen[i+1][j-1] == '1') + (prev_gen[i+1][j] == '1') + (prev_gen[i+1][j+1] == '1'); if (exec_game_rules(prev_gen, next_gen, i, j, alive_neighbours) == 1) { changed = 1; } } // Last row #pragma omp parallel for schedule(static) for (int j = 1; j <= subgrid->cols; ++j) { int i = subgrid->rows; int alive_neighbours = (prev_gen[i-1][j-1] == '1') + (prev_gen[i-1][j] == '1') + (prev_gen[i-1][j+1] == '1') + (prev_gen[i][j-1] == '1') + (prev_gen[i][j+1] == '1') + (prev_gen[i+1][j-1] == '1') + (prev_gen[i+1][j] == '1') + (prev_gen[i+1][j+1] == '1'); if (exec_game_rules(prev_gen, next_gen, i, j, alive_neighbours) == 1) { changed = 1; } } // First column #pragma omp parallel for schedule(static) for (int i = 1; i <= subgrid->rows; ++i) { int j = 1; int alive_neighbours = (prev_gen[i-1][j-1] == '1') + (prev_gen[i-1][j] == '1') + (prev_gen[i-1][j+1] == '1') + (prev_gen[i][j-1] == '1') + (prev_gen[i][j+1] == '1') + (prev_gen[i+1][j-1] == '1') + (prev_gen[i+1][j] == '1') + (prev_gen[i+1][j+1] == '1'); if (exec_game_rules(prev_gen, next_gen, i, j, alive_neighbours) == 1) { changed = 1; } } // Last column #pragma omp parallel for schedule(static) for (int i = 1; i <= subgrid->rows; ++i) { int j = subgrid->cols; int alive_neighbours = (prev_gen[i-1][j-1] == '1') + (prev_gen[i-1][j] == '1') + (prev_gen[i-1][j+1] == '1') + (prev_gen[i][j-1] == '1') + (prev_gen[i][j+1] == '1') + (prev_gen[i+1][j-1] == '1') + (prev_gen[i+1][j] == '1') + (prev_gen[i+1][j+1] == '1'); if (exec_game_rules(prev_gen, next_gen, i, j, alive_neighbours) == 1) { changed = 1; } } return changed; }
fitsmooth.c
#include <stdio.h> /* gets */ #include <stdlib.h> /* atoi, malloc */ #include <string.h> /* strcpy */ #include "math.h" #include "sparse_dok.h" /* in utilities */ #include "quad_tree.h" /* in utilities */ #if defined(__APPLE__) // clang doesn't have openmp #else #include "omp.h" #endif // Errors defined for netcdf reading #define ERRCODE 2 #define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);} //-------------------------- QUANTITY FITTING ------------------------------ // Builds the matrix D used to smooth the interpolation // of a variables from scattered data points to a mesh. See fit.py for more details.s int _build_smoothing_matrix(int n, long* triangles, double* areas, double* vertex_coordinates, int* strides, sparse_dok * smoothing_mat) { int k; int k3,k6; int err = 0; edge_key_t key; double det,area,x0,x1,x2,y0,y1,y2; double a0,b0,a1,b1,a2,b2,e01,e12,e20; int v0,v1,v2; double smoothing_val; for(k=0; k<n; k++) { // multiple k by 3 to give index for triangles which store in 3-blocks k3=k*3; k6=k*6; // store the area for the current triangle area = areas[k]; // store current triangles global vertex indicies v0 = triangles[k3]; v1 = triangles[k3+1]; v2 = triangles[k3+2]; // store the locations of the three verticies x0 = vertex_coordinates[k6]; y0 = vertex_coordinates[k6+1]; x1 = vertex_coordinates[k6+2]; y1 = vertex_coordinates[k6+3]; x2 = vertex_coordinates[k6+4]; y2 = vertex_coordinates[k6+5]; // calculate gradients (move to external function?) det = (y2-y0)*(x1-x0) - (y1-y0)*(x2-x0); a0 = (y2-y0)*(0-1) - (y1-y0)*(0-1); a0 /= det; b0 = (x1-x0)*(0-1) - (x2-x0)*(0-1); b0 /= det; a1 = (y2-y0)*(1-0) - (y1-y0)*(0-0); a1 /= det; b1 = (x1-x0)*(0-0) - (x2-x0)*(1-0); b1 /= det; a2 = (y2-y0)*(0-0) - (y1-y0)*(1-0); a2 /= det; b2 = (x1-x0)*(1-0) - (x2-x0)*(0-0); b2 /= det; // insert diagonal contributions // v0,v0 key.i = v0; key.j = v0; smoothing_val = (a0*a0 + b0*b0)*area; add_dok_entry(smoothing_mat,key,smoothing_val); // v1,v1 key.i = v1; key.j = v1; smoothing_val = (a1*a1 + b1*b1)*area; add_dok_entry(smoothing_mat,key,smoothing_val); // v2,v2 key.i = v2; key.j = v2; smoothing_val = (a2*a2 + b2*b2)*area; add_dok_entry(smoothing_mat,key,smoothing_val); // insert off diagonal contributions e01 = (a0*a1 + b0*b1)*area; // v0,v1 (v1,v0) key.i = v0; key.j = v1; add_dok_entry(smoothing_mat,key,e01); key.i = v1; key.j = v0; add_dok_entry(smoothing_mat,key,e01); e12 = (a1*a2 + b1*b2)*area; // v1,v2 (v2,v1) key.i = v1; key.j = v2; add_dok_entry(smoothing_mat,key,e12); key.i = v2; key.j = v1; add_dok_entry(smoothing_mat,key,e12); e20 = (a2*a0 + b2*b0)*area; // v2,v0 (v0,v2) key.i = v2; key.j = v0; add_dok_entry(smoothing_mat,key,e20); key.i = v0; key.j = v2; add_dok_entry(smoothing_mat,key,e20); } return err; } // Builds a quad tree out of a list of triangles for quick // searching. quad_tree * _build_quad_tree(int n, long* triangles, double* vertex_coordinates, double* extents) { int k,k6; double x0,y0,x1,y1,x2,y2; // set up quad tree and allocate memory quad_tree * tree = new_quad_tree(extents[0],extents[1],extents[2],extents[3]); // iterate through triangles for(k=0; k<n; k++) { // multiple k by 3 to give index for triangles which store in 3-blocks k6=k*6; // store the locations of the three verticies x0 = vertex_coordinates[k6]; y0 = vertex_coordinates[k6 + 1]; x1 = vertex_coordinates[k6 + 2]; y1 = vertex_coordinates[k6 + 3]; x2 = vertex_coordinates[k6 + 4]; y2 = vertex_coordinates[k6 + 5]; triangle * T = new_triangle(k,x0,y0,x1,y1,x2,y2); quad_tree_insert_triangle(tree,T); } // return pointer to new tree struct return tree; } // Builds the AtA and Atz interpolation matrix // and residual. Uses a quad_tree for fast access to the triangles of the mesh. // This function takes a list of point coordinates, and associated point values // (for any number of attributes). int _build_matrix_AtA_Atz_points(int N, long * triangles, double * point_coordinates, double * point_values, int zdims, int npts, sparse_dok * AtA, double ** Atz,quad_tree * quadtree) { int k; int i,w; for(w=0;w<zdims;w++){ for(i=0;i<N;i++){ Atz[w][i]=0; } } edge_key_t key; #pragma omp parallel for private(k,i,key,w) for(k=0;k<npts;k++){ double x = point_coordinates[2*k]; double y = point_coordinates[2*k+1]; triangle * T = search(quadtree,x,y); if(T!=NULL){ double * sigma = calculate_sigma(T,x,y); int js[3]; for(i=0;i<3;i++){ js[i]=triangles[3*(T->index)+i]; } #pragma omp critical { for(i=0;i<3;i++){ for(w=0;w<zdims;w++){ Atz[w][js[i]] += sigma[i]*point_values[zdims*k+w]; } for(w=0;w<3;w++){ key.i=js[i]; key.j=js[w]; add_dok_entry(AtA,key,sigma[i]*sigma[w]); } } } free(sigma); sigma=NULL; } } return 0; } // Combines two sparse_dok matricies and two vectors of doubles. void _combine_partial_AtA_Atz(sparse_dok * dok_AtA1,sparse_dok * dok_AtA2, double* Atz1, double* Atz2, int n, int zdim){ add_sparse_dok(dok_AtA1,1,dok_AtA2,1); int i; for(i=0;i<n*zdim;i++){ Atz1[i]+=Atz2[i]; } }
GB_binop__iseq_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_03__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int16) // A*D function (colscale): GB (_AxD__iseq_int16) // D*A function (rowscale): GB (_DxB__iseq_int16) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_int16) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int16) // C=scalar+B GB (_bind1st__iseq_int16) // C=scalar+B' GB (_bind1st_tran__iseq_int16) // C=A+scalar GB (_bind2nd__iseq_int16) // C=A'+scalar GB (_bind2nd_tran__iseq_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT16 || GxB_NO_ISEQ_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__iseq_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__iseq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB098-simd2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* Two-dimension array computation with a vetorization directive collapse(2) makes simd associate with 2 loops. Loop iteration variables should be predetermined as lastprivate. */ #include <omp.h> int main() { int len = 100; double a[len][len]; double b[len][len]; double c[len][len]; int i; int j; #pragma omp parallel for private (i,j) for (i = 0; i <= len - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { a[i][j] = ((double )i) / 2.0; b[i][j] = ((double )i) / 3.0; c[i][j] = ((double )i) / 7.0; } } #pragma omp parallel for private (i,j) firstprivate (len) for (i = 0; i <= len - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { c[i][j] = a[i][j] * b[i][j]; } } printf("c[50][50]=%f\n",c[50][50]); return 0; }
convolutiondepthwise_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const signed char* kernel = (const signed char*)_kernel + p * 9; int* outptr0 = out; int* outptr0n = outptr0 + outw; const signed char* img0 = bottom_blob.channel(p); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; const signed char* r3 = img0 + w * 3; int i = 0; #if __ARM_NEON int8x16_t _k0123456789x = vld1q_s8(kernel); int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x)); int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x)); int16x4_t _k0123 = vget_low_s16(_k_s16); int16x4_t _k4567 = vget_high_s16(_k_s16); int16x4_t _k8xxx = vget_low_s16(_kn_s16); #endif // __ARM_NEON for (; i + 1 < outh; i += 2) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v4.8b, v5.8b}, [%3] \n" "ld1 {v6.8b, v7.8b}, [%4] \n" "ld1 {v8.8b, v9.8b}, [%5] \n" "ld1 {v10.8b, v11.8b}, [%6] \n" "add %3, %3, #8 \n" "add %4, %4, #8 \n" "add %5, %5, #8 \n" "add %6, %6, #8 \n" "ext v12.8b, v4.8b, v5.8b, #1 \n" "ext v13.8b, v4.8b, v5.8b, #2 \n" "ext v14.8b, v6.8b, v7.8b, #1 \n" "ext v15.8b, v6.8b, v7.8b, #2 \n" "ext v16.8b, v8.8b, v9.8b, #1 \n" "ext v17.8b, v8.8b, v9.8b, #2 \n" "ext v18.8b, v10.8b, v11.8b, #1 \n" "ext v19.8b, v10.8b, v11.8b, #2 \n" "sshll v4.8h, v4.8b, #0 \n" // r00 "sshll v12.8h, v12.8b, #0 \n" // r01 "sshll v13.8h, v13.8b, #0 \n" // r02 "sshll v6.8h, v6.8b, #0 \n" // r10 "sshll v14.8h, v14.8b, #0 \n" // r11 "sshll v15.8h, v15.8b, #0 \n" // r12 "sshll v8.8h, v8.8b, #0 \n" // r20 "sshll v16.8h, v16.8b, #0 \n" // r21 "sshll v17.8h, v17.8b, #0 \n" // r22 "sshll v10.8h, v10.8b, #0 \n" // r30 "sshll v18.8h, v18.8b, #0 \n" // r31 "sshll v19.8h, v19.8b, #0 \n" // r32 // r0 "smull v20.4s, v4.4h, %14.h[0] \n" // (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %14.h[0] \n" "smull v22.4s, v12.4h, %14.h[1] \n" // (r01 - r08) * k01 "smull2 v23.4s, v12.8h, %14.h[1] \n" "smull v24.4s, v13.4h, %14.h[2] \n" // (r02 - r09) * k02 "smull2 v25.4s, v13.8h, %14.h[2] \n" // r1 "smull v26.4s, v6.4h, %14.h[0] \n" // (r10 - r17) * k00 "smull2 v27.4s, v6.8h, %14.h[0] \n" "smull v28.4s, v14.4h, %14.h[1] \n" // (r11 - r18) * k01 "smull2 v29.4s, v14.8h, %14.h[1] \n" "smull v30.4s, v15.4h, %14.h[2] \n" // (r12 - r19) * k02 "smull2 v31.4s, v15.8h, %14.h[2] \n" "smlal v20.4s, v6.4h, %14.h[3] \n" // (r10 - r17) * k03 "smlal2 v21.4s, v6.8h, %14.h[3] \n" "smlal v22.4s, v14.4h, %15.h[0] \n" // (r11 - r18) * k04 "smlal2 v23.4s, v14.8h, %15.h[0] \n" "smlal v24.4s, v15.4h, %15.h[1] \n" // (r12 - r19) * k05 "smlal2 v25.4s, v15.8h, %15.h[1] \n" // r2 "smlal v26.4s, v8.4h, %14.h[3] \n" // (r20 - r27) * k03 "smlal2 v27.4s, v8.8h, %14.h[3] \n" "smlal v28.4s, v16.4h, %15.h[0] \n" // (r21 - r28) * k04 "smlal2 v29.4s, v16.8h, %15.h[0] \n" "smlal v30.4s, v17.4h, %15.h[1] \n" // (r22 - r29) * k05 "smlal2 v31.4s, v17.8h, %15.h[1] \n" "smlal v20.4s, v8.4h, %15.h[2] \n" // (r20 - r27) * k06 "smlal2 v21.4s, v8.8h, %15.h[2] \n" "smlal v22.4s, v16.4h, %15.h[3] \n" // (r21 - r28) * k07 "smlal2 v23.4s, v16.8h, %15.h[3] \n" "smlal v24.4s, v17.4h, %16.h[0] \n" // (r22 - r29) * k08 "smlal2 v25.4s, v17.8h, %16.h[0] \n" // r3 "smlal v26.4s, v10.4h, %15.h[2] \n" // (r30 - r37) * k06 "smlal2 v27.4s, v10.8h, %15.h[2] \n" "smlal v28.4s, v18.4h, %15.h[3] \n" // (r31 - r38) * k07 "smlal2 v29.4s, v18.8h, %15.h[3] \n" "smlal v30.4s, v19.4h, %16.h[0] \n" // (r32 - r39) * k08 "smlal2 v31.4s, v19.8h, %16.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v26.4s, v26.4s, v28.4s \n" "add v27.4s, v27.4s, v29.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" "add v26.4s, v26.4s, v30.4s \n" "add v27.4s, v27.4s, v31.4s \n" "st1 {v20.4s, v21.4s}, [%1], #32 \n" "st1 {v26.4s, v27.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr0n), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr0), "2"(outptr0n), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k8xxx) // %16 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld1.s8 {d30-d31}, [%3] \n" // r0 "add %3, %3, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r00 "vmovl.s8 q5, d10 \n" // r01 "vmovl.s8 q6, d12 \n" // r02 // sum0 "vmull.s16 q7, d30, %P14[0] \n" // (r00 - r07) * k00 "vmull.s16 q8, d31, %P14[0] \n" "vmull.s16 q9, d10, %P14[1] \n" // (r01 - r08) * k01 "vmull.s16 q10, d11, %P14[1] \n" "vmlal.s16 q7, d12, %P14[2] \n" // (r02 - r09) * k02 "vmlal.s16 q8, d13, %P14[2] \n" // r1 "vld1.s8 {d30-d31}, [%4] \n" // r1 "add %4, %4, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r10 "vmovl.s8 q5, d10 \n" // r11 "vmovl.s8 q6, d12 \n" // r12 // sum0 "vmlal.s16 q7, d30, %P14[3] \n" // (r10 - r17) * k03 "vmlal.s16 q8, d31, %P14[3] \n" "vmlal.s16 q9, d10, %P15[0] \n" // (r11 - r18) * k04 "vmlal.s16 q10, d11, %P15[0] \n" "vmlal.s16 q7, d12, %P15[1] \n" // (r12 - r19) * k05 "vmlal.s16 q8, d13, %P15[1] \n" // sum1 "vmull.s16 q11, d30, %P14[0] \n" // (r10 - r17) * k00 "vmull.s16 q12, d31, %P14[0] \n" "vmull.s16 q13, d10, %P14[1] \n" // (r11 - r18) * k01 "vmull.s16 q14, d11, %P14[1] \n" "vmlal.s16 q11, d12, %P14[2] \n" // (r12 - r19) * k02 "vmlal.s16 q12, d13, %P14[2] \n" // r2 "vld1.s8 {d30-d31}, [%5] \n" // r2 "add %5, %5, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r20 "vmovl.s8 q5, d10 \n" // r21 "vmovl.s8 q6, d12 \n" // r22 // sum0 "vmlal.s16 q7, d30, %P15[2] \n" // (r20 - r27) * k06 "vmlal.s16 q8, d31, %P15[2] \n" "vmlal.s16 q9, d10, %P15[3] \n" // (r21 - r28) * k07 "vmlal.s16 q10, d11, %P15[3] \n" "vmlal.s16 q7, d12, %P16[0] \n" // (r22 - r29) * k08 "vmlal.s16 q8, d13, %P16[0] \n" // sum1 "vmlal.s16 q11, d30, %P14[3] \n" // (r20 - r27) * k03 "vmlal.s16 q12, d31, %P14[3] \n" "vmlal.s16 q13, d10, %P15[0] \n" // (r21 - r28) * k04 "vmlal.s16 q14, d11, %P15[0] \n" "vmlal.s16 q11, d12, %P15[1] \n" // (r22 - r29) * k05 "vmlal.s16 q12, d13, %P15[1] \n" // r3 "vld1.s8 {d30-d31}, [%6] \n" // r3 "add %6, %6, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r30 "vmovl.s8 q5, d10 \n" // r31 "vmovl.s8 q6, d12 \n" // r32 // sum1 "vmlal.s16 q11, d30, %P15[2] \n" // (r30 - r37) * k06 "vmlal.s16 q12, d31, %P15[2] \n" "vmlal.s16 q13, d10, %P15[3] \n" // (r31 - r38) * k07 "vmlal.s16 q14, d11, %P15[3] \n" "vmlal.s16 q11, d12, %P16[0] \n" // (r32 - r39) * k08 "vmlal.s16 q12, d13, %P16[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.s32 {d14-d17}, [%1]! \n" "vst1.s32 {d22-d25}, [%2]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr0n), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr0), "2"(outptr0n), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k8xxx) // %16 : "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO NEON int sum0 = 0; int sum0n = 0; sum0 += (int)r0[0] * kernel[0]; sum0 += (int)r0[1] * kernel[1]; sum0 += (int)r0[2] * kernel[2]; sum0 += (int)r1[0] * kernel[3]; sum0 += (int)r1[1] * kernel[4]; sum0 += (int)r1[2] * kernel[5]; sum0 += (int)r2[0] * kernel[6]; sum0 += (int)r2[1] * kernel[7]; sum0 += (int)r2[2] * kernel[8]; sum0n += (int)r1[0] * kernel[0]; sum0n += (int)r1[1] * kernel[1]; sum0n += (int)r1[2] * kernel[2]; sum0n += (int)r2[0] * kernel[3]; sum0n += (int)r2[1] * kernel[4]; sum0n += (int)r2[2] * kernel[5]; sum0n += (int)r3[0] * kernel[6]; sum0n += (int)r3[1] * kernel[7]; sum0n += (int)r3[2] * kernel[8]; *outptr0 = sum0; *outptr0n = sum0n; r0++; r1++; r2++; r3++; outptr0++; outptr0n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr0n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v4.8b, v5.8b}, [%2] \n" "ld1 {v6.8b, v7.8b}, [%3] \n" "ld1 {v8.8b, v9.8b}, [%4] \n" "add %2, %2, #8 \n" "add %3, %3, #8 \n" "add %4, %4, #8 \n" "ext v12.8b, v4.8b, v5.8b, #1 \n" "ext v13.8b, v4.8b, v5.8b, #2 \n" "ext v14.8b, v6.8b, v7.8b, #1 \n" "ext v15.8b, v6.8b, v7.8b, #2 \n" "ext v16.8b, v8.8b, v9.8b, #1 \n" "ext v17.8b, v8.8b, v9.8b, #2 \n" "sshll v4.8h, v4.8b, #0 \n" // r00 "sshll v12.8h, v12.8b, #0 \n" // r01 "sshll v13.8h, v13.8b, #0 \n" // r02 "sshll v6.8h, v6.8b, #0 \n" // r10 "sshll v14.8h, v14.8b, #0 \n" // r11 "sshll v15.8h, v15.8b, #0 \n" // r12 "sshll v8.8h, v8.8b, #0 \n" // r20 "sshll v16.8h, v16.8b, #0 \n" // r21 "sshll v17.8h, v17.8b, #0 \n" // r22 // r0 "smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %10.h[0] \n" "smull v22.4s, v12.4h, %10.h[1] \n" // (r01 - r08) * k01 "smull2 v23.4s, v12.8h, %10.h[1] \n" "smull v24.4s, v13.4h, %10.h[2] \n" // (r02 - r09) * k02 "smull2 v25.4s, v13.8h, %10.h[2] \n" // r1 "smlal v20.4s, v6.4h, %10.h[3] \n" // (r10 - r17) * k03 "smlal2 v21.4s, v6.8h, %10.h[3] \n" "smlal v22.4s, v14.4h, %11.h[0] \n" // (r11 - r18) * k04 "smlal2 v23.4s, v14.8h, %11.h[0] \n" "smlal v24.4s, v15.4h, %11.h[1] \n" // (r12 - r19) * k05 "smlal2 v25.4s, v15.8h, %11.h[1] \n" // r2 "smlal v20.4s, v8.4h, %11.h[2] \n" // (r20 - r27) * k06 "smlal2 v21.4s, v8.8h, %11.h[2] \n" "smlal v22.4s, v16.4h, %11.h[3] \n" // (r21 - r28) * k07 "smlal2 v23.4s, v16.8h, %11.h[3] \n" "smlal v24.4s, v17.4h, %12.h[0] \n" // (r22 - r29) * k08 "smlal2 v25.4s, v17.8h, %12.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" "st1 {v20.4s, v21.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld1.s8 {d30-d31}, [%2] \n" // r0 "add %2, %2, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r00 "vmovl.s8 q5, d10 \n" // r01 "vmovl.s8 q6, d12 \n" // r02 // sum0 "vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00 "vmull.s16 q8, d31, %P10[0] \n" "vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01 "vmull.s16 q10, d11, %P10[1] \n" "vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02 "vmlal.s16 q8, d13, %P10[2] \n" // r1 "vld1.s8 {d30-d31}, [%3] \n" // r1 "add %3, %3, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r10 "vmovl.s8 q5, d10 \n" // r11 "vmovl.s8 q6, d12 \n" // r12 // sum0 "vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03 "vmlal.s16 q8, d31, %P10[3] \n" "vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04 "vmlal.s16 q10, d11, %P11[0] \n" "vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05 "vmlal.s16 q8, d13, %P11[1] \n" // r2 "vld1.s8 {d30-d31}, [%4] \n" // r2 "add %4, %4, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r20 "vmovl.s8 q5, d10 \n" // r21 "vmovl.s8 q6, d12 \n" // r22 // sum0 "vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06 "vmlal.s16 q8, d31, %P11[2] \n" "vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07 "vmlal.s16 q10, d11, %P11[3] \n" "vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08 "vmlal.s16 q8, d13, %P12[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vst1.s32 {d14-d17}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * kernel[0]; sum += (int)r0[1] * kernel[1]; sum += (int)r0[2] * kernel[2]; sum += (int)r1[0] * kernel[3]; sum += (int)r1[1] * kernel[4]; sum += (int)r1[2] * kernel[5]; sum += (int)r2[0] * kernel[6]; sum += (int)r2[1] * kernel[7]; sum += (int)r2[2] * kernel[8]; *outptr0 = sum; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const signed char* kernel = (const signed char*)_kernel + p * 9; int* outptr = out; const signed char* img = bottom_blob.channel(p); const signed char* r0 = img; const signed char* r1 = img + w; const signed char* r2 = img + w * 2; int i = 0; #if __ARM_NEON int8x16_t _k0123456789x = vld1q_s8(kernel); int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x)); int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x)); int16x4_t _k0123 = vget_low_s16(_k_s16); int16x4_t _k4567 = vget_high_s16(_k_s16); int16x4_t _k8xxx = vget_low_s16(_kn_s16); #endif // __ARM_NEON for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld2 {v4.8b, v5.8b}, [%2], #16 \n" "ld2 {v6.8b, v7.8b}, [%2] \n" "ld2 {v8.8b, v9.8b}, [%3], #16 \n" "ld2 {v10.8b, v11.8b}, [%3] \n" "ld2 {v12.8b, v13.8b}, [%4], #16 \n" "ld2 {v14.8b, v15.8b}, [%4] \n" "ext v6.8b, v4.8b, v6.8b, #1 \n" "ext v10.8b, v8.8b, v10.8b, #1 \n" "ext v14.8b, v12.8b, v14.8b, #1 \n" "sshll v4.8h, v4.8b, #0 \n" // r00 "sshll v5.8h, v5.8b, #0 \n" // r01 "sshll v6.8h, v6.8b, #0 \n" // r02 "sshll v8.8h, v8.8b, #0 \n" // r10 "sshll v9.8h, v9.8b, #0 \n" // r11 "sshll v10.8h, v10.8b, #0 \n" // r12 "sshll v12.8h, v12.8b, #0 \n" // r20 "sshll v13.8h, v13.8b, #0 \n" // r21 "sshll v14.8h, v14.8b, #0 \n" // r22 // r0 "smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %10.h[0] \n" "smull v22.4s, v5.4h, %10.h[1] \n" // (r01 - r08) * k01 "smull2 v23.4s, v5.8h, %10.h[1] \n" "smull v24.4s, v6.4h, %10.h[2] \n" // (r02 - r09) * k02 "smull2 v25.4s, v6.8h, %10.h[2] \n" // r1 "smlal v20.4s, v8.4h, %10.h[3] \n" // (r10 - r17) * k03 "smlal2 v21.4s, v8.8h, %10.h[3] \n" "smlal v22.4s, v9.4h, %11.h[0] \n" // (r11 - r18) * k04 "smlal2 v23.4s, v9.8h, %11.h[0] \n" "smlal v24.4s, v10.4h, %11.h[1] \n" // (r12 - r19) * k05 "smlal2 v25.4s, v10.8h, %11.h[1] \n" // r2 "smlal v20.4s, v12.4h, %11.h[2] \n" // (r20 - r27) * k06 "smlal2 v21.4s, v12.8h, %11.h[2] \n" "smlal v22.4s, v13.4h, %11.h[3] \n" // (r21 - r28) * k07 "smlal2 v23.4s, v13.8h, %11.h[3] \n" "smlal v24.4s, v14.4h, %12.h[0] \n" // (r22 - r29) * k08 "smlal2 v25.4s, v14.8h, %12.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" "st1 {v20.4s, v21.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld2.s8 {d30-d31}, [%2]! \n" // r0 "vld2.s8 {d10-d11}, [%2] \n" "vext.s8 d12, d30, d10, #1 \n" "vmovl.s8 q5, d31 \n" // r01 "vmovl.s8 q15, d30 \n" // r00 "vmovl.s8 q6, d12 \n" // r02 // sum0 "vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00 "vmull.s16 q8, d31, %P10[0] \n" "vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01 "vmull.s16 q10, d11, %P10[1] \n" "vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02 "vmlal.s16 q8, d13, %P10[2] \n" // r1 "vld2.s8 {d30-d31}, [%3]! \n" // r1 "vld2.s8 {d10-d11}, [%3] \n" "vext.s8 d12, d30, d10, #1 \n" "vmovl.s8 q5, d31 \n" // r11 "vmovl.s8 q15, d30 \n" // r10 "vmovl.s8 q6, d12 \n" // r12 // sum0 "vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03 "vmlal.s16 q8, d31, %P10[3] \n" "vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04 "vmlal.s16 q10, d11, %P11[0] \n" "vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05 "vmlal.s16 q8, d13, %P11[1] \n" // r2 "vld2.s8 {d30-d31}, [%4]! \n" // r2 "vld2.s8 {d10-d11}, [%4] \n" "vext.s8 d12, d30, d10, #1 \n" "vmovl.s8 q5, d31 \n" // r21 "vmovl.s8 q15, d30 \n" // r20 "vmovl.s8 q6, d12 \n" // r22 // sum0 "vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06 "vmlal.s16 q8, d31, %P11[2] \n" "vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07 "vmlal.s16 q10, d11, %P11[3] \n" "vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08 "vmlal.s16 q8, d13, %P12[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vst1.s32 {d14-d17}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * kernel[0]; sum += (int)r0[1] * kernel[1]; sum += (int)r0[2] * kernel[2]; sum += (int)r1[0] * kernel[3]; sum += (int)r1[1] * kernel[4]; sum += (int)r1[2] * kernel[5]; sum += (int)r2[0] * kernel[6]; sum += (int)r2[1] * kernel[7]; sum += (int)r2[2] * kernel[8]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } static void convdw3x3s1_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; const signed char* kernel = (const signed char*)_kernel + p * 9; signed char* outptr0 = out; signed char* outptr0n = outptr0 + outw; const signed char* img0 = bottom_blob.channel(p); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w * 2; const signed char* r3 = img0 + w * 3; int i = 0; #if __ARM_NEON int8x16_t _k0123456789x = vld1q_s8(kernel); int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x)); int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x)); int16x4_t _k0123 = vget_low_s16(_k_s16); int16x4_t _k4567 = vget_high_s16(_k_s16); int16x4_t _k8xxx = vget_low_s16(_kn_s16); #endif // __ARM_NEON for (; i + 1 < outh; i += 2) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v4.8b, v5.8b}, [%3] \n" "ld1 {v6.8b, v7.8b}, [%4] \n" "ld1 {v8.8b, v9.8b}, [%5] \n" "ld1 {v10.8b, v11.8b}, [%6] \n" "add %3, %3, #8 \n" "add %4, %4, #8 \n" "add %5, %5, #8 \n" "add %6, %6, #8 \n" "ext v12.8b, v4.8b, v5.8b, #1 \n" "ext v13.8b, v4.8b, v5.8b, #2 \n" "ext v14.8b, v6.8b, v7.8b, #1 \n" "ext v15.8b, v6.8b, v7.8b, #2 \n" "ext v16.8b, v8.8b, v9.8b, #1 \n" "ext v17.8b, v8.8b, v9.8b, #2 \n" "ext v18.8b, v10.8b, v11.8b, #1 \n" "ext v19.8b, v10.8b, v11.8b, #2 \n" "sshll v4.8h, v4.8b, #0 \n" // r00 "sshll v12.8h, v12.8b, #0 \n" // r01 "sshll v13.8h, v13.8b, #0 \n" // r02 "sshll v6.8h, v6.8b, #0 \n" // r10 "sshll v14.8h, v14.8b, #0 \n" // r11 "sshll v15.8h, v15.8b, #0 \n" // r12 "sshll v8.8h, v8.8b, #0 \n" // r20 "sshll v16.8h, v16.8b, #0 \n" // r21 "sshll v17.8h, v17.8b, #0 \n" // r22 "sshll v10.8h, v10.8b, #0 \n" // r30 "sshll v18.8h, v18.8b, #0 \n" // r31 "sshll v19.8h, v19.8b, #0 \n" // r32 // r0 "smull v20.4s, v4.4h, %14.h[0] \n" // (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %14.h[0] \n" "smull v22.4s, v12.4h, %14.h[1] \n" // (r01 - r08) * k01 "smull2 v23.4s, v12.8h, %14.h[1] \n" "smull v24.4s, v13.4h, %14.h[2] \n" // (r02 - r09) * k02 "smull2 v25.4s, v13.8h, %14.h[2] \n" // r1 "smull v26.4s, v6.4h, %14.h[0] \n" // (r10 - r17) * k00 "smull2 v27.4s, v6.8h, %14.h[0] \n" "smull v28.4s, v14.4h, %14.h[1] \n" // (r11 - r18) * k01 "smull2 v29.4s, v14.8h, %14.h[1] \n" "smull v30.4s, v15.4h, %14.h[2] \n" // (r12 - r19) * k02 "smull2 v31.4s, v15.8h, %14.h[2] \n" "smlal v20.4s, v6.4h, %14.h[3] \n" // (r10 - r17) * k03 "smlal2 v21.4s, v6.8h, %14.h[3] \n" "smlal v22.4s, v14.4h, %15.h[0] \n" // (r11 - r18) * k04 "smlal2 v23.4s, v14.8h, %15.h[0] \n" "smlal v24.4s, v15.4h, %15.h[1] \n" // (r12 - r19) * k05 "smlal2 v25.4s, v15.8h, %15.h[1] \n" // r2 "smlal v26.4s, v8.4h, %14.h[3] \n" // (r20 - r27) * k03 "smlal2 v27.4s, v8.8h, %14.h[3] \n" "smlal v28.4s, v16.4h, %15.h[0] \n" // (r21 - r28) * k04 "smlal2 v29.4s, v16.8h, %15.h[0] \n" "smlal v30.4s, v17.4h, %15.h[1] \n" // (r22 - r29) * k05 "smlal2 v31.4s, v17.8h, %15.h[1] \n" "smlal v20.4s, v8.4h, %15.h[2] \n" // (r20 - r27) * k06 "smlal2 v21.4s, v8.8h, %15.h[2] \n" "smlal v22.4s, v16.4h, %15.h[3] \n" // (r21 - r28) * k07 "smlal2 v23.4s, v16.8h, %15.h[3] \n" "smlal v24.4s, v17.4h, %16.h[0] \n" // (r22 - r29) * k08 "smlal2 v25.4s, v17.8h, %16.h[0] \n" // r3 "smlal v26.4s, v10.4h, %15.h[2] \n" // (r30 - r37) * k06 "smlal2 v27.4s, v10.8h, %15.h[2] \n" "smlal v28.4s, v18.4h, %15.h[3] \n" // (r31 - r38) * k07 "smlal2 v29.4s, v18.8h, %15.h[3] \n" "smlal v30.4s, v19.4h, %16.h[0] \n" // (r32 - r39) * k08 "smlal2 v31.4s, v19.8h, %16.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v26.4s, v26.4s, v28.4s \n" "add v27.4s, v27.4s, v29.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" "add v26.4s, v26.4s, v30.4s \n" "add v27.4s, v27.4s, v31.4s \n" "dup v4.4s, %w17 \n" // bias "dup v5.4s, %w18 \n" // scale_in "dup v6.4s, %w19 \n" // scale_out // top_s32 -> top_f32 "scvtf v20.4s, v20.4s \n" "scvtf v21.4s, v21.4s \n" "scvtf v26.4s, v26.4s \n" "scvtf v27.4s, v27.4s \n" // top_f32 = top_f32 * scale_in "fmul v20.4s, v20.4s, v5.4s \n" "fmul v21.4s, v21.4s, v5.4s \n" "fmul v26.4s, v26.4s, v5.4s \n" "fmul v27.4s, v27.4s, v5.4s \n" // top_f32 = top_f32 + bias "fadd v20.4s, v20.4s, v4.4s \n" "fadd v21.4s, v21.4s, v4.4s \n" "fadd v26.4s, v26.4s, v4.4s \n" "fadd v27.4s, v27.4s, v4.4s \n" // top_f32 = top_f32 * scale_out "fmul v20.4s, v20.4s, v6.4s \n" "fmul v21.4s, v21.4s, v6.4s \n" "fmul v26.4s, v26.4s, v6.4s \n" "fmul v27.4s, v27.4s, v6.4s \n" // top_f32 -> top_s32 "fcvtas v20.4s, v20.4s \n" "fcvtas v21.4s, v21.4s \n" "fcvtas v26.4s, v26.4s \n" "fcvtas v27.4s, v27.4s \n" // top_s32 -> top_s16 "sqxtn v7.4h, v20.4s \n" "sqxtn v9.4h, v26.4s \n" "sqxtn2 v7.8h, v21.4s \n" "sqxtn2 v9.8h, v27.4s \n" // top_s16 -> top_s8 "sqxtn v8.8b, v7.8h \n" "sqxtn v10.8b, v9.8h \n" // save top_s8 "st1 {v8.8b}, [%1], #8 \n" "st1 {v10.8b}, [%2], #8 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr0n), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr0), "2"(outptr0n), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k8xxx), // %16 "r"(bias0), // %17 "r"(scale_requant_in), // %18 "r"(scale_requant_out) // %19 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld1.s8 {d30-d31}, [%3] \n" // r0 "add %3, %3, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r00 "vmovl.s8 q5, d10 \n" // r01 "vmovl.s8 q6, d12 \n" // r02 // sum0 "vmull.s16 q7, d30, %P14[0] \n" // (r00 - r07) * k00 "vmull.s16 q8, d31, %P14[0] \n" "vmull.s16 q9, d10, %P14[1] \n" // (r01 - r08) * k01 "vmull.s16 q10, d11, %P14[1] \n" "vmlal.s16 q7, d12, %P14[2] \n" // (r02 - r09) * k02 "vmlal.s16 q8, d13, %P14[2] \n" // r1 "vld1.s8 {d30-d31}, [%4] \n" // r1 "add %4, %4, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r10 "vmovl.s8 q5, d10 \n" // r11 "vmovl.s8 q6, d12 \n" // r12 // sum0 "vmlal.s16 q7, d30, %P14[3] \n" // (r10 - r17) * k03 "vmlal.s16 q8, d31, %P14[3] \n" "vmlal.s16 q9, d10, %P15[0] \n" // (r11 - r18) * k04 "vmlal.s16 q10, d11, %P15[0] \n" "vmlal.s16 q7, d12, %P15[1] \n" // (r12 - r19) * k05 "vmlal.s16 q8, d13, %P15[1] \n" // sum1 "vmull.s16 q11, d30, %P14[0] \n" // (r10 - r17) * k00 "vmull.s16 q12, d31, %P14[0] \n" "vmull.s16 q13, d10, %P14[1] \n" // (r11 - r18) * k01 "vmull.s16 q14, d11, %P14[1] \n" "vmlal.s16 q11, d12, %P14[2] \n" // (r12 - r19) * k02 "vmlal.s16 q12, d13, %P14[2] \n" // r2 "vld1.s8 {d30-d31}, [%5] \n" // r2 "add %5, %5, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r20 "vmovl.s8 q5, d10 \n" // r21 "vmovl.s8 q6, d12 \n" // r22 // sum0 "vmlal.s16 q7, d30, %P15[2] \n" // (r20 - r27) * k06 "vmlal.s16 q8, d31, %P15[2] \n" "vmlal.s16 q9, d10, %P15[3] \n" // (r21 - r28) * k07 "vmlal.s16 q10, d11, %P15[3] \n" "vmlal.s16 q7, d12, %P16[0] \n" // (r22 - r29) * k08 "vmlal.s16 q8, d13, %P16[0] \n" // sum1 "vmlal.s16 q11, d30, %P14[3] \n" // (r20 - r27) * k03 "vmlal.s16 q12, d31, %P14[3] \n" "vmlal.s16 q13, d10, %P15[0] \n" // (r21 - r28) * k04 "vmlal.s16 q14, d11, %P15[0] \n" "vmlal.s16 q11, d12, %P15[1] \n" // (r22 - r29) * k05 "vmlal.s16 q12, d13, %P15[1] \n" // r3 "vld1.s8 {d30-d31}, [%6] \n" // r3 "add %6, %6, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r30 "vmovl.s8 q5, d10 \n" // r31 "vmovl.s8 q6, d12 \n" // r32 // sum1 "vmlal.s16 q11, d30, %P15[2] \n" // (r30 - r37) * k06 "vmlal.s16 q12, d31, %P15[2] \n" "vmlal.s16 q13, d10, %P15[3] \n" // (r31 - r38) * k07 "vmlal.s16 q14, d11, %P15[3] \n" "vmlal.s16 q11, d12, %P16[0] \n" // (r32 - r39) * k08 "vmlal.s16 q12, d13, %P16[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vdup.f32 q13, %17 \n" // bias "vdup.f32 q14, %18 \n" // scale_in "vdup.f32 q15, %19 \n" // scale_out // top_s32 -> top_f32 "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q0, q7, q14 \n" "vmul.f32 q4, q8, q14 \n" // top_f32 = top_f32 + bias "vadd.f32 q0, q0, q13 \n" "vadd.f32 q4, q4, q13 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q0, q15 \n" "vmul.f32 q4, q4, q15 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s16, s16 \n" "vcvtr.s32.f32 s17, s17 \n" "vcvtr.s32.f32 s18, s18 \n" "vcvtr.s32.f32 s19, s19 \n" // top_s32 -> top_s16 "vqmovn.s32 d14, q0 \n" "vqmovn.s32 d15, q4 \n" // top_s16 -> top_s8 "vqmovn.s16 d14, q7 \n" // save top_s8 "vst1.8 {d14}, [%1]! \n" // top_s32 -> top_f32 "vcvt.f32.s32 q11, q11 \n" "vcvt.f32.s32 q12, q12 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q0, q11, q14 \n" "vmul.f32 q4, q12, q14 \n" // top_f32 = top_f32 + bias "vadd.f32 q0, q0, q13 \n" "vadd.f32 q4, q4, q13 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q0, q15 \n" "vmul.f32 q4, q4, q15 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s16, s16 \n" "vcvtr.s32.f32 s17, s17 \n" "vcvtr.s32.f32 s18, s18 \n" "vcvtr.s32.f32 s19, s19 \n" // top_s32 -> top_s16 "vqmovn.s32 d14, q0 \n" "vqmovn.s32 d15, q4 \n" // top_s16 -> top_s8 "vqmovn.s16 d14, q7 \n" // save top_s8 "vst1.8 {d14}, [%2]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr0n), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr0), "2"(outptr0n), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k8xxx), // %16 "r"(bias0), // %17 "r"(scale_requant_in), // %18 "r"(scale_requant_out) // %19 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { // TODO NEON int sum0 = 0; int sum0n = 0; sum0 += (int)r0[0] * kernel[0]; sum0 += (int)r0[1] * kernel[1]; sum0 += (int)r0[2] * kernel[2]; sum0 += (int)r1[0] * kernel[3]; sum0 += (int)r1[1] * kernel[4]; sum0 += (int)r1[2] * kernel[5]; sum0 += (int)r2[0] * kernel[6]; sum0 += (int)r2[1] * kernel[7]; sum0 += (int)r2[2] * kernel[8]; sum0n += (int)r1[0] * kernel[0]; sum0n += (int)r1[1] * kernel[1]; sum0n += (int)r1[2] * kernel[2]; sum0n += (int)r2[0] * kernel[3]; sum0n += (int)r2[1] * kernel[4]; sum0n += (int)r2[2] * kernel[5]; sum0n += (int)r3[0] * kernel[6]; sum0n += (int)r3[1] * kernel[7]; sum0n += (int)r3[2] * kernel[8]; *outptr0 = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); *outptr0n = float2int8(((float)sum0n * scale_requant_in + bias0) * scale_requant_out); r0++; r1++; r2++; r3++; outptr0++; outptr0n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr0n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "dup v26.4s, %w13 \n" "dup v27.4s, %w14 \n" "dup v28.4s, %w15 \n" "0: \n" "ld1 {v4.8b, v5.8b}, [%2] \n" "ld1 {v6.8b, v7.8b}, [%3] \n" "ld1 {v8.8b, v9.8b}, [%4] \n" "add %2, %2, #8 \n" "add %3, %3, #8 \n" "add %4, %4, #8 \n" "ext v12.8b, v4.8b, v5.8b, #1 \n" "ext v13.8b, v4.8b, v5.8b, #2 \n" "ext v14.8b, v6.8b, v7.8b, #1 \n" "ext v15.8b, v6.8b, v7.8b, #2 \n" "ext v16.8b, v8.8b, v9.8b, #1 \n" "ext v17.8b, v8.8b, v9.8b, #2 \n" "sshll v4.8h, v4.8b, #0 \n" // r00 "sshll v12.8h, v12.8b, #0 \n" // r01 "sshll v13.8h, v13.8b, #0 \n" // r02 "sshll v6.8h, v6.8b, #0 \n" // r10 "sshll v14.8h, v14.8b, #0 \n" // r11 "sshll v15.8h, v15.8b, #0 \n" // r12 "sshll v8.8h, v8.8b, #0 \n" // r20 "sshll v16.8h, v16.8b, #0 \n" // r21 "sshll v17.8h, v17.8b, #0 \n" // r22 // r0 "smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %10.h[0] \n" "smull v22.4s, v12.4h, %10.h[1] \n" // (r01 - r08) * k01 "smull2 v23.4s, v12.8h, %10.h[1] \n" "smull v24.4s, v13.4h, %10.h[2] \n" // (r02 - r09) * k02 "smull2 v25.4s, v13.8h, %10.h[2] \n" // r1 "smlal v20.4s, v6.4h, %10.h[3] \n" // (r10 - r17) * k03 "smlal2 v21.4s, v6.8h, %10.h[3] \n" "smlal v22.4s, v14.4h, %11.h[0] \n" // (r11 - r18) * k04 "smlal2 v23.4s, v14.8h, %11.h[0] \n" "smlal v24.4s, v15.4h, %11.h[1] \n" // (r12 - r19) * k05 "smlal2 v25.4s, v15.8h, %11.h[1] \n" // r2 "smlal v20.4s, v8.4h, %11.h[2] \n" // (r20 - r27) * k06 "smlal2 v21.4s, v8.8h, %11.h[2] \n" "smlal v22.4s, v16.4h, %11.h[3] \n" // (r21 - r28) * k07 "smlal2 v23.4s, v16.8h, %11.h[3] \n" "smlal v24.4s, v17.4h, %12.h[0] \n" // (r22 - r29) * k08 "smlal2 v25.4s, v17.8h, %12.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" // top_s32 -> top_f32 "scvtf v20.4s, v20.4s \n" "scvtf v21.4s, v21.4s \n" // top_f32 = top_f32 * scale_in "fmul v20.4s, v20.4s, v27.4s \n" "fmul v21.4s, v21.4s, v27.4s \n" // top_f32 = top_f32 + bias "fadd v20.4s, v20.4s, v26.4s \n" "fadd v21.4s, v21.4s, v26.4s \n" // top_f32 = top_f32 * scale_out "fmul v20.4s, v20.4s, v28.4s \n" "fmul v21.4s, v21.4s, v28.4s \n" // top_f32 -> top_s32 "fcvtas v20.4s, v20.4s \n" "fcvtas v21.4s, v21.4s \n" // top_s32 -> top_s16 "sqxtn v7.4h, v20.4s \n" "sqxtn2 v7.8h, v21.4s \n" // top_s16 -> top_s8 "sqxtn v8.8b, v7.8h \n" // save top_s8 "st1 {v8.8b}, [%1], #8 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx), // %12 "r"(bias0), // %13 "r"(scale_requant_in), // %14 "r"(scale_requant_out) // %15 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld1.s8 {d30-d31}, [%2] \n" // r0 "add %2, %2, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r00 "vmovl.s8 q5, d10 \n" // r01 "vmovl.s8 q6, d12 \n" // r02 // sum0 "vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00 "vmull.s16 q8, d31, %P10[0] \n" "vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01 "vmull.s16 q10, d11, %P10[1] \n" "vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02 "vmlal.s16 q8, d13, %P10[2] \n" // r1 "vld1.s8 {d30-d31}, [%3] \n" // r1 "add %3, %3, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r10 "vmovl.s8 q5, d10 \n" // r11 "vmovl.s8 q6, d12 \n" // r12 // sum0 "vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03 "vmlal.s16 q8, d31, %P10[3] \n" "vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04 "vmlal.s16 q10, d11, %P11[0] \n" "vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05 "vmlal.s16 q8, d13, %P11[1] \n" // r2 "vld1.s8 {d30-d31}, [%4] \n" // r2 "add %4, %4, #8 \n" "vext.s8 d10, d30, d31, #1 \n" "vext.s8 d12, d30, d31, #2 \n" "vmovl.s8 q15, d30 \n" // r20 "vmovl.s8 q5, d10 \n" // r21 "vmovl.s8 q6, d12 \n" // r22 // sum0 "vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06 "vmlal.s16 q8, d31, %P11[2] \n" "vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07 "vmlal.s16 q10, d11, %P11[3] \n" "vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08 "vmlal.s16 q8, d13, %P12[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vdup.f32 q13, %13 \n" // bias "vdup.f32 q14, %14 \n" // scale_in "vdup.f32 q15, %15 \n" // scale_out // top_s32 -> top_f32 "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q0, q7, q14 \n" "vmul.f32 q4, q8, q14 \n" // top_f32 = top_f32 + bias "vadd.f32 q0, q0, q13 \n" "vadd.f32 q4, q4, q13 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q0, q15 \n" "vmul.f32 q4, q4, q15 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s16, s16 \n" "vcvtr.s32.f32 s17, s17 \n" "vcvtr.s32.f32 s18, s18 \n" "vcvtr.s32.f32 s19, s19 \n" // top_s32 -> top_s16 "vqmovn.s32 d14, q0 \n" "vqmovn.s32 d15, q4 \n" // top_s16 -> top_s8 "vqmovn.s16 d14, q7 \n" // save top_s8 "vst1.8 {d14}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx), // %12 "r"(bias0), // %13 "r"(scale_requant_in), // %14 "r"(scale_requant_out) // %15 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * kernel[0]; sum += (int)r0[1] * kernel[1]; sum += (int)r0[2] * kernel[2]; sum += (int)r1[0] * kernel[3]; sum += (int)r1[1] * kernel[4]; sum += (int)r1[2] * kernel[5]; sum += (int)r2[0] * kernel[6]; sum += (int)r2[1] * kernel[7]; sum += (int)r2[2] * kernel[8]; *outptr0 = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; const int tailstep = w - 2 * outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; const signed char* kernel = (const signed char*)_kernel + p * 9; signed char* outptr = out; const signed char* img = bottom_blob.channel(p); const signed char* r0 = img; const signed char* r1 = img + w; const signed char* r2 = img + w * 2; int i = 0; #if __ARM_NEON int8x16_t _k0123456789x = vld1q_s8(kernel); int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x)); int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x)); int16x4_t _k0123 = vget_low_s16(_k_s16); int16x4_t _k4567 = vget_high_s16(_k_s16); int16x4_t _k8xxx = vget_low_s16(_kn_s16); #endif // __ARM_NEON for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "dup v26.4s, %w13 \n" "dup v27.4s, %w14 \n" "dup v28.4s, %w15 \n" "0: \n" "ld2 {v4.8b, v5.8b}, [%2], #16 \n" "ld2 {v6.8b, v7.8b}, [%2] \n" "ld2 {v8.8b, v9.8b}, [%3], #16 \n" "ld2 {v10.8b, v11.8b}, [%3] \n" "ld2 {v12.8b, v13.8b}, [%4], #16 \n" "ld2 {v14.8b, v15.8b}, [%4] \n" "ext v6.8b, v4.8b, v6.8b, #1 \n" "ext v10.8b, v8.8b, v10.8b, #1 \n" "ext v14.8b, v12.8b, v14.8b, #1 \n" "sshll v4.8h, v4.8b, #0 \n" // r00 "sshll v5.8h, v5.8b, #0 \n" // r01 "sshll v6.8h, v6.8b, #0 \n" // r02 "sshll v8.8h, v8.8b, #0 \n" // r10 "sshll v9.8h, v9.8b, #0 \n" // r11 "sshll v10.8h, v10.8b, #0 \n" // r12 "sshll v12.8h, v12.8b, #0 \n" // r20 "sshll v13.8h, v13.8b, #0 \n" // r21 "sshll v14.8h, v14.8b, #0 \n" // r22 // r0 "smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %10.h[0] \n" "smull v22.4s, v5.4h, %10.h[1] \n" // (r01 - r08) * k01 "smull2 v23.4s, v5.8h, %10.h[1] \n" "smull v24.4s, v6.4h, %10.h[2] \n" // (r02 - r09) * k02 "smull2 v25.4s, v6.8h, %10.h[2] \n" // r1 "smlal v20.4s, v8.4h, %10.h[3] \n" // (r10 - r17) * k03 "smlal2 v21.4s, v8.8h, %10.h[3] \n" "smlal v22.4s, v9.4h, %11.h[0] \n" // (r11 - r18) * k04 "smlal2 v23.4s, v9.8h, %11.h[0] \n" "smlal v24.4s, v10.4h, %11.h[1] \n" // (r12 - r19) * k05 "smlal2 v25.4s, v10.8h, %11.h[1] \n" // r2 "smlal v20.4s, v12.4h, %11.h[2] \n" // (r20 - r27) * k06 "smlal2 v21.4s, v12.8h, %11.h[2] \n" "smlal v22.4s, v13.4h, %11.h[3] \n" // (r21 - r28) * k07 "smlal2 v23.4s, v13.8h, %11.h[3] \n" "smlal v24.4s, v14.4h, %12.h[0] \n" // (r22 - r29) * k08 "smlal2 v25.4s, v14.8h, %12.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" // top_s32 -> top_f32 "scvtf v20.4s, v20.4s \n" "scvtf v21.4s, v21.4s \n" // top_f32 = top_f32 * scale_in "fmul v20.4s, v20.4s, v27.4s \n" "fmul v21.4s, v21.4s, v27.4s \n" // top_f32 = top_f32 + bias "fadd v20.4s, v20.4s, v26.4s \n" "fadd v21.4s, v21.4s, v26.4s \n" // top_f32 = top_f32 * scale_out "fmul v20.4s, v20.4s, v28.4s \n" "fmul v21.4s, v21.4s, v28.4s \n" // top_f32 -> top_s32 "fcvtas v20.4s, v20.4s \n" "fcvtas v21.4s, v21.4s \n" // top_s32 -> top_s16 "sqxtn v7.4h, v20.4s \n" "sqxtn2 v7.8h, v21.4s \n" // top_s16 -> top_s8 "sqxtn v8.8b, v7.8h \n" // save top_s8 "st1 {v8.8b}, [%1], #8 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx), // %12 "r"(bias0), // %13 "r"(scale_requant_in), // %14 "r"(scale_requant_out) // %15 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld2.s8 {d30-d31}, [%2]! \n" // r0 "vld2.s8 {d10-d11}, [%2] \n" "vext.s8 d12, d30, d10, #1 \n" "vmovl.s8 q5, d31 \n" // r01 "vmovl.s8 q15, d30 \n" // r00 "vmovl.s8 q6, d12 \n" // r02 // sum0 "vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00 "vmull.s16 q8, d31, %P10[0] \n" "vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01 "vmull.s16 q10, d11, %P10[1] \n" "vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02 "vmlal.s16 q8, d13, %P10[2] \n" // r1 "vld2.s8 {d30-d31}, [%3]! \n" // r1 "vld2.s8 {d10-d11}, [%3] \n" "vext.s8 d12, d30, d10, #1 \n" "vmovl.s8 q5, d31 \n" // r11 "vmovl.s8 q15, d30 \n" // r10 "vmovl.s8 q6, d12 \n" // r12 // sum0 "vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03 "vmlal.s16 q8, d31, %P10[3] \n" "vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04 "vmlal.s16 q10, d11, %P11[0] \n" "vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05 "vmlal.s16 q8, d13, %P11[1] \n" // r2 "vld2.s8 {d30-d31}, [%4]! \n" // r2 "vld2.s8 {d10-d11}, [%4] \n" "vext.s8 d12, d30, d10, #1 \n" "vmovl.s8 q5, d31 \n" // r21 "vmovl.s8 q15, d30 \n" // r20 "vmovl.s8 q6, d12 \n" // r22 // sum0 "vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06 "vmlal.s16 q8, d31, %P11[2] \n" "vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07 "vmlal.s16 q10, d11, %P11[3] \n" "vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08 "vmlal.s16 q8, d13, %P12[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vdup.f32 q11, %13 \n" // bias "vdup.f32 q12, %14 \n" // scale_in "vdup.f32 q13, %15 \n" // scale_out // top_s32 -> top_f32 "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q0, q7, q12 \n" "vmul.f32 q4, q8, q12 \n" // top_f32 = top_f32 + bias "vadd.f32 q0, q0, q11 \n" "vadd.f32 q4, q4, q11 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q0, q13 \n" "vmul.f32 q4, q4, q13 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s16, s16 \n" "vcvtr.s32.f32 s17, s17 \n" "vcvtr.s32.f32 s18, s18 \n" "vcvtr.s32.f32 s19, s19 \n" // top_s32 -> top_s16 "vqmovn.s32 d14, q0 \n" "vqmovn.s32 d15, q4 \n" // top_s16 -> top_s8 "vqmovn.s16 d14, q7 \n" // save top_s8 "vst1.8 {d14}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx), // %12 "r"(bias0), // %13 "r"(scale_requant_in), // %14 "r"(scale_requant_out) // %15 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { int sum = 0; sum += (int)r0[0] * kernel[0]; sum += (int)r0[1] * kernel[1]; sum += (int)r0[2] * kernel[2]; sum += (int)r1[0] * kernel[3]; sum += (int)r1[1] * kernel[4]; sum += (int)r1[2] * kernel[5]; sum += (int)r2[0] * kernel[6]; sum += (int)r2[1] * kernel[7]; sum += (int)r2[2] * kernel[8]; *outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
sort.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ /* * this program uses an algorithm that we call `cilksort'. * The algorithm is essentially mergesort: * * cilksort(in[1..n]) = * spawn cilksort(in[1..n/2], tmp[1..n/2]) * spawn cilksort(in[n/2..n], tmp[n/2..n]) * sync * spawn cilkmerge(tmp[1..n/2], tmp[n/2..n], in[1..n]) * * * The procedure cilkmerge does the following: * * cilkmerge(A[1..n], B[1..m], C[1..(n+m)]) = * find the median of A \union B using binary * search. The binary search gives a pair * (ma, mb) such that ma + mb = (n + m)/2 * and all elements in A[1..ma] are smaller than * B[mb..m], and all the B[1..mb] are smaller * than all elements in A[ma..n]. * * spawn cilkmerge(A[1..ma], B[1..mb], C[1..(n+m)/2]) * spawn cilkmerge(A[ma..m], B[mb..n], C[(n+m)/2 .. (n+m)]) * sync * * The algorithm appears for the first time (AFAIK) in S. G. Akl and * N. Santoro, "Optimal Parallel Merging and Sorting Without Memory * Conflicts", IEEE Trans. Comp., Vol. C-36 No. 11, Nov. 1987 . The * paper does not express the algorithm using recursion, but the * idea of finding the median is there. * * For cilksort of n elements, T_1 = O(n log n) and * T_\infty = O(log^3 n). There is a way to shave a * log factor in the critical path (left as homework). */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "bots.h" #include "app-desc.h" ELM *array, *tmp; static unsigned long rand_nxt = 0; static inline unsigned long my_rand(void) { rand_nxt = rand_nxt * 1103515245 + 12345; return rand_nxt; } static inline void my_srand(unsigned long seed) { rand_nxt = seed; } static inline ELM med3(ELM a, ELM b, ELM c) { if (a < b) { if (b < c) { return b; } else { if (a < c) return c; else return a; } } else { if (b > c) { return b; } else { if (a > c) return c; else return a; } } } /* * simple approach for now; a better median-finding * may be preferable */ static inline ELM choose_pivot(ELM *low, ELM *high) { return med3(*low, *high, low[(high - low) / 2]); } static ELM *seqpart(ELM *low, ELM *high) { ELM pivot; ELM h, l; ELM *curr_low = low; ELM *curr_high = high; pivot = choose_pivot(low, high); while (1) { while ((h = *curr_high) > pivot) curr_high--; while ((l = *curr_low) < pivot) curr_low++; if (curr_low >= curr_high) break; *curr_high-- = l; *curr_low++ = h; } /* * I don't know if this is really necessary. * The problem is that the pivot is not always the * first element, and the partition may be trivial. * However, if the partition is trivial, then * *high is the largest element, whence the following * code. */ if (curr_high < high) return curr_high; else return curr_high - 1; } #define swap(a, b) \ { \ ELM tmp;\ tmp = a;\ a = b;\ b = tmp;\ } static void insertion_sort(ELM *low, ELM *high) { ELM *p, *q; ELM a, b; for (q = low + 1; q <= high; ++q) { a = q[0]; for (p = q - 1; p >= low && (b = p[0]) > a; p--) p[1] = b; p[1] = a; } } /* * tail-recursive quicksort, almost unrecognizable :-) */ void seqquick(ELM *low, ELM *high) { ELM *p; while (high - low >= bots_app_cutoff_value_2) { p = seqpart(low, high); seqquick(low, p); low = p + 1; } insertion_sort(low, high); } void seqmerge(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { ELM a1, a2; /* * The following 'if' statement is not necessary * for the correctness of the algorithm, and is * in fact subsumed by the rest of the function. * However, it is a few percent faster. Here is why. * * The merging loop below has something like * if (a1 < a2) { * *dest++ = a1; * ++low1; * if (end of array) break; * a1 = *low1; * } * * Now, a1 is needed immediately in the next iteration * and there is no way to mask the latency of the load. * A better approach is to load a1 *before* the end-of-array * check; the problem is that we may be speculatively * loading an element out of range. While this is * probably not a problem in practice, yet I don't feel * comfortable with an incorrect algorithm. Therefore, * I use the 'fast' loop on the array (except for the last * element) and the 'slow' loop for the rest, saving both * performance and correctness. */ if (low1 < high1 && low2 < high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; a1 = *++low1; if (low1 >= high1) break; } else { *lowdest++ = a2; a2 = *++low2; if (low2 >= high2) break; } } } if (low1 <= high1 && low2 <= high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; ++low1; if (low1 > high1) break; a1 = *low1; } else { *lowdest++ = a2; ++low2; if (low2 > high2) break; a2 = *low2; } } } if (low1 > high1) { memcpy(lowdest, low2, sizeof(ELM) * (high2 - low2 + 1)); } else { memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1 + 1)); } } #define swap_indices(a, b) \ { \ ELM *tmp;\ tmp = a;\ a = b;\ b = tmp;\ } ELM *binsplit(ELM val, ELM *low, ELM *high) { /* * returns index which contains greatest element <= val. If val is * less than all elements, returns low-1 */ ELM *mid; while (low != high) { mid = low + ((high - low + 1) >> 1); if (val <= *mid) high = mid - 1; else low = mid; } if (*low > val) return low - 1; else return low; } void cilkmerge_par(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { /* * Cilkmerge: Merges range [low1, high1] with range [low2, high2] * into the range [lowdest, ...] */ ELM *split1, *split2; /* * where each of the ranges are broken for * recursive merge */ long int lowsize; /* * total size of lower halves of two * ranges - 2 */ /* * We want to take the middle element (indexed by split1) from the * larger of the two arrays. The following code assumes that split1 * is taken from range [low1, high1]. So if [low1, high1] is * actually the smaller range, we should swap it with [low2, high2] */ if (high2 - low2 > high1 - low1) { swap_indices(low1, low2); swap_indices(high1, high2); } if (high2 < low2) { /* smaller range is empty */ memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1)); return; } if (high2 - low2 < bots_app_cutoff_value ) { seqmerge(low1, high1, low2, high2, lowdest); return; } /* * Basic approach: Find the middle element of one range (indexed by * split1). Find where this element would fit in the other range * (indexed by split 2). Then merge the two lower halves and the two * upper halves. */ split1 = ((high1 - low1 + 1) / 2) + low1; split2 = binsplit(*split1, low2, high2); lowsize = split1 - low1 + split2 - low2; /* * directly put the splitting element into * the appropriate location */ *(lowdest + lowsize + 1) = *split1; #pragma omp task untied cilkmerge_par(low1, split1 - 1, low2, split2, lowdest); #pragma omp task untied cilkmerge_par(split1 + 1, high1, split2 + 1, high2, lowdest + lowsize + 2); #pragma omp taskwait ; return; } void cilksort_par(ELM *low, ELM *tmp, long size) { /* * divide the input in four parts of the same size (A, B, C, D) * Then: * 1) recursively sort A, B, C, and D (in parallel) * 2) merge A and B into tmp1, and C and D into tmp2 (in parallel) * 3) merge tmp1 and tmp2 into the original array */ long quarter = size / 4; ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD; if (size < bots_app_cutoff_value_1 ) { /* quicksort when less than 1024 elements */ seqquick(low, low + size - 1); return; } A = low; tmpA = tmp; B = A + quarter; tmpB = tmpA + quarter; C = B + quarter; tmpC = tmpB + quarter; D = C + quarter; tmpD = tmpC + quarter; #pragma omp task untied cilksort_par(A, tmpA, quarter); #pragma omp task untied cilksort_par(B, tmpB, quarter); #pragma omp task untied cilksort_par(C, tmpC, quarter); #pragma omp task untied cilksort_par(D, tmpD, size - 3 * quarter); #pragma omp taskwait ; #pragma omp task untied cilkmerge_par(A, A + quarter - 1, B, B + quarter - 1, tmpA); #pragma omp task untied cilkmerge_par(C, C + quarter - 1, D, low + size - 1, tmpC); #pragma omp taskwait ; cilkmerge_par(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A); } void scramble_array( ELM *array ) { unsigned long i; unsigned long j; for (i = 0; i < bots_arg_size; ++i) { j = my_rand(); j = j % bots_arg_size; swap(array[i], array[j]); } } void fill_array( ELM *array ) { unsigned long i; my_srand(1); /* first, fill with integers 1..size */ for (i = 0; i < bots_arg_size; ++i) { array[i] = i; } } void sort_init ( void ) { /* Checking arguments */ if (bots_arg_size < 4) { bots_message("%s can not be less than 4, using 4 as a parameter.\n", BOTS_APP_DESC_ARG_SIZE ); bots_arg_size = 4; } if (bots_app_cutoff_value < 2) { bots_message("%s can not be less than 2, using 2 as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF); bots_app_cutoff_value = 2; } else if (bots_app_cutoff_value > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF, bots_arg_size); bots_app_cutoff_value = bots_arg_size; } if (bots_app_cutoff_value_1 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_1, bots_arg_size); bots_app_cutoff_value_1 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, bots_arg_size); bots_app_cutoff_value_2 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_app_cutoff_value_1) { bots_message("%s can not be greather than %s, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, BOTS_APP_DESC_ARG_CUTOFF_1, bots_app_cutoff_value_1 ); bots_app_cutoff_value_2 = bots_app_cutoff_value_1; } array = (ELM *) malloc(bots_arg_size * sizeof(ELM)); tmp = (ELM *) malloc(bots_arg_size * sizeof(ELM)); fill_array(array); scramble_array(array); } void sort_par ( void ) { bots_message("Computing multisort algorithm (n=%d) ", bots_arg_size); const unsigned long long full_program_start = current_time_ns(); { #pragma omp parallel { #pragma omp single nowait { #pragma omp task untied { cilksort_par(array, tmp, bots_arg_size); } } } } ; const unsigned long long full_program_end = current_time_ns(); printf("full_program %llu ns\n", full_program_end - full_program_start); bots_message(" completed!\n"); } int sort_verify ( void ) { int i, success = 1; for (i = 0; i < bots_arg_size; ++i) if (array[i] != i) success = 0; return success ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL; }
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_columns </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // </li> // <li> \ref custom_operations </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref openmp_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref configuration_files </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref custom_data_types </li> // <li> \ref error_reporting_customization </li> // <li> \ref intra_statement_optimization </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // In order for \b Blaze to work properly, the Boost library must be installed on the system. It // is recommended to use the newest Boost library available, but \b Blaze requires at minimum the // Boost version 1.54.0. If you don't have Boost installed on your system, you can download it for // free from 'http://www.boost.org'. // // Additionally, for maximum performance \b Blaze expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Furthermore, for computing the determinant of a dense matrix and for the dense matrix inversion // \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either of // these features is used it is necessary to link the LAPACK library to the final executable. If // no LAPACK library is available the use of these features will result in a linker error. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_installation_unix Linux/MacOSX User // // The first step is the installation of the header files. Since \b Blaze only consists of header // files, the <tt>./blaze</tt> subdirectory can be simply copied to a standard include directory // (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Windows User // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects of // \b Blaze can be adapted to specific requirements, environments and architectures by customizing // the header files in the <tt>./blaze/config/</tt> subdirectory. Since the default settings are // reasonable for most systems this step can also be skipped. However, in order to achieve maximum // performance a customization of at least the following configuration files is required: // // - <b><tt>./blaze/config/BLAS.h</tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt>./blaze/config/CacheSize.h</tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt>./blaze/config/Thresholds.h</tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays typedef CustomVector<int,unaligned,unpadded,columnVector> UnalignedUnpadded; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays typedef CustomVector<float,unaligned,padded,columnVector> UnalignedPadded; UnalignedPadded b( new float[16], 9UL, 16UL, blaze::ArrayDelete() ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays typedef CustomVector<double,aligned,unpadded,rowVector> AlignedUnpadded; AlignedUnpadded c( blaze::allocate<double>( 7UL ), 7UL, blaze::Deallocate() ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays typedef CustomVector<complex<double>,aligned,padded,columnVector> AlignedPadded; AlignedPadded d( allocate< complex<double> >( 8UL ), 5UL, 8UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // When constructing a custom vector there are two choices: Either a user manually manages the // array of elements outside the custom vector, or alternatively passes the responsibility for // the memory management to an instance of CustomVector. In the second case the CustomVector // class employs shared ownership between all copies of the custom vector, which reference the // same array. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::columnVector; using blaze::rowVector; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom row vector with size 3 for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom vector by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom vector. CustomVector<int,unaligned,unpadded,rowVector> b( new int[3], 3UL, ArrayDelete() ); // Definition of a custom vector with size 3 and capacity 16 with aligned and padded // integer array. The memory management is passed to the custom vector by providing a // deleter of type 'blaze::Deallocate'. CustomVector<int,aligned,padded> c( allocate<int>( 16UL ), 3UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; typedef CustomVector<int,unaligned,unpadded> CustomType; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affect vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. In case a deleter has been provided to the first // custom vector, both vectors share the responsibility to destroy the array when the last vector // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unpadded; int* array = blaze::allocate<int>( 5UL ); // Needs to be 32-bit aligned CustomVector<int,aligned,unpadded> a( array, 5UL, Deallocate() ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomVector<double,aligned,padded> CustomType; // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType b( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); CustomType c( allocate<double>( 4UL ), 3UL, 4UL, Deallocate() ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomVector<double,aligned,unpadded> CustomType; // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType b( allocate<double>( 3UL ), 3UL, Deallocate() ); CustomType c( allocate<double>( 3UL ), 3UL, Deallocate() ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is a multiple of the SIMD vector width. In case of unaligned // padded vectors \f$ N-1 \f$ additional padding elements are required, where \f$ N \f$ is // the SIMD vector width. In case the padding is insufficient with respect to the available // instruction set, a \c std::invalid_argument exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense vector classes can be directly initialized by means of an initializer // list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; \endcode // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v16( v7 ); // Instantiation of the dense column vector v16 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v17( v9 ); // Instantiation of the dense row vector v17 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v18( v1 ); // Instantiation of the sparse column vector v18 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v19( v12 ); // Instantiation of the sparse row vector v19 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v23( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v24( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense vector: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 4.0, -1.7, 8.6, -7.2 }; \endcode // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore, all // vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. The first // option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the vector // the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In contrast // to the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_member_functions Member Functions // <hr> // // \subsection vector_operations_size .size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are not preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code typedef blaze::DynamicVector<int,rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v1( 10UL ); // Creating a dynamic vector of size 10 SubvectorType sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // // \n \section vector_operations_free_functions Free Functions // <hr> // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, rows, and columns) is in default state if all its elements are in default state. // For instance, in case the vector is instantiated for a built-in integral or floating point data // type, the function returns \c true in case all vector elements are 0 and \c false in case any // vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse vector, respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, 4 }; blaze::StaticVector<int,4UL,rowVector> b{ -5, 2, -7, -4 }; min( a ); // Returns -5 min( b ); // Returns -7 max( a ); // Returns 7 max( b ); // Returns 2 \endcode // In case the vector currently has a size of 0, both functions return 0. Additionally, in case // a given sparse vector is not completely filled, the zero elements are taken into account. For // example: the following compressed vector has only 2 non-zero elements. However, the minimum // of this vector is 0: \code blaze::CompressedVector<int> c( 4UL, 2UL ); c[0] = 1; c[2] = 3; min( c ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor() \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_clip clip() // // The \c clip() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clip( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = pow( a, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // vectors. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicVector<double> a, b; b = forEach( a, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length of a vector, both the \c length() and \c sqrLength() function // can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_transpose trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_conjugate_transpose ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays typedef CustomMatrix<int,unaligned,unpadded,rowMajor> UnalignedUnpadded; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays typedef CustomMatrix<float,unaligned,padded,columnMajor> UnalignedPadded; UnalignedPadded B( new float[40], 5UL, 6UL, 8UL, blaze::ArrayDelete() ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays typedef CustomMatrix<double,aligned,unpadded,rowMajor> AlignedUnpadded; AlignedUnpadded C( blaze::allocate<double>( 192UL ), 12UL, 13UL, 16UL, blaze::Deallocate ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays typedef CustomMatrix<complex<double>,aligned,padded,columnMajor> AlignedPadded; AlignedPadded D( blaze::allocate<double>( 112UL ), 7UL, 14UL, 16UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // When constructing a custom matrix there are two choices: Either a user manually manages the // array of elements outside the custom matrix, or alternatively passes the responsibility for // the memory management to an instance of CustomMatrix. In the second case the CustomMatrix // class employs shared ownership between all copies of the custom matrix, which reference the // same array. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::ArrayDelete; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a 3x4 custom row-major matrix for unaligned, unpadded integer arrays. // The responsibility for the memory management is passed to the custom matrix by // providing a deleter of type 'blaze::ArrayDelete' that is used during the destruction // of the custom matrix. CustomMatrix<int,unaligned,unpadded,rowMajor> B( new int[12], 3UL, 4UL, ArrayDelete() ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). The memory management is passed // to the custom matrix by providing a deleter of type 'blaze::Deallocate'. CustomMatrix<int,aligned,padded> C( allocate<int>( 128UL ), 8UL, 12UL, 16UL, Deallocate() ); \endcode // It is possible to pass any type of deleter to the constructor. The deleter is only required // to provide a function call operator that can be passed the pointer to the managed array. As // an example the following code snipped shows the implementation of two native \b Blaze deleters // blaze::ArrayDelete and blaze::Deallocate: \code namespace blaze { struct ArrayDelete { template< typename Type > inline void operator()( Type ptr ) const { boost::checked_array_delete( ptr ); } }; struct Deallocate { template< typename Type > inline void operator()( Type ptr ) const { deallocate( ptr ); } }; } // namespace blaze \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; typedef CustomMatrix<int,unaligned,unpadded> CustomType; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affect matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. In case a deleter has been provided to the first // custom matrix, both matrices share the responsibility to destroy the array when the last matrix // goes out of scope. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; int* array = blaze::allocate<int>( 40UL ); // Is guaranteed to be 32-bit aligned CustomMatrix<int,aligned,padded,rowMajor> A( array, 5UL, 6UL, 8UL, Deallocate() ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomMatrix<double,aligned,padded> CustomType; // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomMatrix<double,aligned,unpadded> CustomType; // Creating unpadded custom 3x3 matrix CustomType A( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType B( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); CustomType C( allocate<double>( 12UL ), 3UL, 3UL, 4UL, Deallocate() ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense matrix classes can be directly initialized by means of an initializer // list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; \endcode // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M15( M6 ); // Instantiation of the dense row-major matrix M15 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M16( M8 ); // Instantiation of the dense column-major matrix M16 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M17( M7 ); // Instantiation of the compressed column-major matrix // M17 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M18( M8 ); // Instantiation of the compressed row-major matrix // M18 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M19( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M20( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense matrix: \code blaze::DynamicMatrix<double> M; M = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; \endcode // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore, // all matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. The first possibility to add elements to a sparse matrix is the function call // operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the matrix // the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In // contrast to the function call operator and the \c set() function it emits an exception in case // the element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row. Additionally, the // index of the new element must be larger than the index of the previous element in the same // row. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.finalize( 0 ); // Finalizing row 0 M1.append( 1, 1, 2 ); // Appending the value 2 in row 1 with column index 1 M1.finalize( 1 ); // Finalizing row 1 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \n \section matrix_operations_member_functions Member Functions // <hr> // // \subsection matrix_operations_rows .rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \n \subsection matrix_operations_capacity .capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType M1( 10UL, 20UL ); // Creating a 10x20 matrix RowType row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \section matrix_operations_free_functions Free Functions // <hr> // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse matrix, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -5, 2, 7 }, { 4, 0, 1 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B{ { -5, 2, -7 }, { -4, 0, -1 } }; min( A ); // Returns -5 min( B ); // Returns -7 max( A ); // Returns 7 max( B ); // Returns 2 \endcode // In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in // case a given sparse matrix is not completely filled, the zero elements are taken into account. // For example: the following compressed matrix has only 2 non-zero elements. However, the minimum // of this matrix is 0: \code blaze::CompressedMatrix<int> C( 2UL, 3UL ); C(0,0) = 1; C(0,2) = 3; min( C ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_clip clip() // // The \c clip() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clip( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = pow( A, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_foreach forEach() // // Via the \c forEach() function it is possible to execute custom operations on dense and sparse // matrices. For instance, the following example demonstrates a custom square root computation via // a lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ) { return std::sqrt( d ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // // \n \subsection matrix_operations_matrix_transpose trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // Additionally, matrices can be transposed in-place via the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_conjugate_transpose ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // when calling the \c invert() function: \code using blaze::asGeneral; using blaze::asSymmetric; using blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; invert<asGeneral> ( A ); // In-place inversion of a general matrix invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix invert<asLower> ( A ); // In-place inversion of a lower triangular matrix invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if the fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomSymmetric; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array CustomSymmetric B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; CompressedMatrix<float> E( 3, 3 ); // Empty row-major sparse single precision 3x3 matrix SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > F; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > G; F = A + B; // Matrix addition and assignment to a row-major symmetric matrix G = A - C; // Matrix subtraction and assignment to a column-major symmetric matrix G = A * E; // Matrix multiplication between a dense and a sparse matrix A *= 2.0; // In-place scaling of matrix A F = 2.0 * B; // Scaling of matrix B G = E * 2.0; // Scaling of matrix E F += A - B; // Addition assignment G -= A + C; // Subtraction assignment G *= A * E; // Multiplication assignment \endcode // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef SymmetricMatrix< DynamicMatrix<double,columnMajor> > DynamicSymmetric; DynamicSymmetric A( 10UL ); Row<DynamicSymmetric> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomHermitian; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array CustomHermitian B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; typedef complex<float> cplx; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef HermitianMatrix< DynamicMatrix<double,columnMajor> > DynamicHermitian; DynamicHermitian A( 10UL ); // Both Hermitian and symmetric Row<DynamicHermitian> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; typedef LowerMatrix< CompressedMatrix<double,rowMajor> > CompressedLower; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomLower; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array CustomLower B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices typedef blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> > SmallDiagonalMatrix; // Recommendation 2: use sparse matrices for large diagonal matrices typedef blaze::DiagonalMatrix< blaze::CompressedMatrix<float> > LargeDiagonalMatrix; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // and blaze::UpperMatrix within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); UpperMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; UpperMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix F = C - D; // Matrix subtraction and assignment to a column-major upper matrix F = A * D; // Matrix multiplication between a dense and a sparse matrix C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B F = C * 2.0; // Scaling of matrix C E += A - B; // Addition assignment F -= C + D; // Subtraction assignment F *= A * D; // Multiplication assignment \endcode // Note that diagonal, unitriangular and strictly triangular matrix types can be used in the same // way, but may pose some additional restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row or column of a matrix. As such, views act as a reference to a specific part of a vector // or matrix. This reference is valid and can be used in every way as any other vector or matrix // can be used as long as the referenced vector or matrix is not resized or entirely destroyed. // Views also act as alias to the elements of the vector or matrix: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) via the view are immediately visible in // the vector or matrix and changes made via the vector or matrix are immediately visible in the // view. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_columns // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix // // ( 1 0 -2 3 0 ) // ( 0 2 5 -1 -1 ) // ( 1 0 0 2 1 ) // DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector // // ( 18 19 ) // StaticVector<int,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_class The Subvector Class Template // <hr> // // The blaze::Subvector class template represents a view on a specific subvector of a dense or // sparse vector primitive. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The type of the vector is specified via two template parameters: \code template< typename VT, bool AF > class Subvector; \endcode // - \c VT: specifies the type of the vector primitive. Subvector can be used with every vector // primitive or view, but does not work with any vector expression type. // - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. This view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. A subvector created from a row // vector can be used as any other row vector, a subvector created from a column vector can be // used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector // of a vector primitive on the left-hand side of an assignment or to grant read-access to a // specific subvector of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<int,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2; SparseVectorType s1, s2; // ... Resizing and initialization // Creating a view on the first ten elements of the dense vector d1 blaze::Subvector<DenseVectorType> dsv = subvector( d1, 0UL, 10UL ); // Creating a view on the second ten elements of the sparse vector s1 blaze::Subvector<SparseVectorType> ssv = subvector( s1, 10UL, 10UL ); // Creating a view on the addition of d2 and s2 dsv = subvector( d2 + s2, 5UL, 10UL ); // Creating a view on the multiplication of d2 and s2 ssv = subvector( d2 * s2, 2UL, 10UL ); \endcode // The \c subvector() function can be used on any dense or sparse vector, including expressions, // as demonstrated in the example. Note however that a blaze::Subvector can only be instantiated // with a dense or sparse vector primitive, i.e. with types that can be written, and not with an // expression type. // // // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. For instance, the current // number of elements can be obtained via the \c size() function, the current capacity via the // \c capacity() function, and the number of non-zero elements via the \c nonZeros() function. // However, since subvectors are references to a specific range of a vector, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense subvector view: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v SubvectorType sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector SubvectorType sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an Iterator, which allows a manipulation of the non-zero values, in case // of constant subvectors a ConstIterator is returned: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the dense vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code typedef blaze::CompressedVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the sparse vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v( 256UL ); // Non-initialized vector of size 256 typedef blaze::Subvector<VectorType> SubvectorType; SubvectorType sv( subvector( v, 10UL, 60UL ) ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the vector it is inserted into the vector, if it is already contained // in the vector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2, d3; SparseVectorType s1, s2; // ... Resizing and initialization typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; typedef blaze::Subvector<DenseVectorType> SubvectorType; SubvectorType dsv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 dsv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = dsv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = dsv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= dsv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; DenseVectorType x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] blaze::Subvector<DenseVectorType> sv1 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType> sv2 = subvector<unaligned>( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv3 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv4 = subvector<unaligned>( x, 8UL, 16UL ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned dense subvector in the range [8..23] blaze::Subvector<DenseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; using blaze::columnVector; typedef blaze::DynamicVector<double,columnVector> VectorType; typedef blaze::Subvector<VectorType,aligned> SubvectorType; VectorType d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned SubvectorType dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned SubvectorType dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element SubvectorType dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned SubvectorType dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; SparseVectorType x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] blaze::Subvector<SparseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // \n \section views_subvectors_on_subvectors Subvectors on Subvectors // <hr> // // It is also possible to create a subvector view on another subvector. In this context it is // important to remember that the type returned by the \c subvector() function is the same type // as the type of the given subvector, not a nested subvector type, since the view on a subvector // is just another view on the underlying vector: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType d1; // ... Resizing and initialization // Creating a subvector view on the dense vector d1 SubvectorType sv1 = subvector( d1, 5UL, 10UL ); // Creating a subvector view on the dense subvector sv1 SubvectorType sv2 = subvector( sv1, 1UL, 5UL ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_class The Submatrix Class Template // <hr> // // The blaze::Submatrix class template represents a view on a specific submatrix of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The type of the matrix is specified via two template parameters: \code template< typename MT, bool AF > class Submatrix; \endcode // - \c MT: specifies the type of the matrix primitive. Submatrix can be used with every matrix // primitive, but does not work with any matrix expression type. // - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a submatrix can be created very conveniently via the \c submatrix() function. // This view can be treated as any other matrix, i.e. it can be assigned to, it can be copied // from, and it can be used in arithmetic operations. A submatrix created from a row-major // matrix will itself be a row-major matrix, a submatrix created from a column-major matrix // will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedVector<int,blaze::columnMajor> SparseMatrixType; DenseMatrixType D1, D2; SparseMatrixType S1, S2; // ... Resizing and initialization // Creating a view on the first 8x16 block of the dense matrix D1 blaze::Submatrix<DenseMatrixType> dsm = submatrix( D1, 0UL, 0UL, 8UL, 16UL ); // Creating a view on the second 8x16 block of the sparse matrix S1 blaze::Submatrix<SparseMatrixType> ssm = submatrix( S1, 0UL, 16UL, 8UL, 16UL ); // Creating a view on the addition of D2 and S2 dsm = submatrix( D2 + S2, 5UL, 10UL, 8UL, 16UL ); // Creating a view on the multiplication of D2 and S2 ssm = submatrix( D2 * S2, 7UL, 13UL, 8UL, 16UL ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // The current size of the matrix, i.e. the number of rows or columns can be obtained via the // \c rows() and \c columns() functions, the current total capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since submatrices // are views on a specific submatrix of a matrix, several operations are not possible on views, // such as resizing and swapping: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A; // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A SubmatrixType sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix SubmatrixType sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an Iterator, // which allows a manipulation of the non-zero values, in case of constant submatrices a // ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the dense matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code typedef blaze::CompressedMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the sparse matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 typedef blaze::Submatrix<MatrixType> SubmatrixType; SubmatrixType sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in case of sparse matrices, elements can also be inserted via the append() function. // In case of submatrices, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index in the according row or column of the // submatrix and that the according row's or column's capacity is large enough to hold the new // element. Note however that due to the nature of a submatrix, which may be an alias to the // middle of a sparse matrix, the append() function does not work as efficiently for a // submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; DenseMatrixType D1, D2, D3; SparseMatrixType S1, S2; typedef blaze::CompressedVector<double,blaze::columnVector> SparseVectorType; SparseVectorType a, b; // ... Resizing and initialization typedef Submatrix<DenseMatrixType> SubmatrixType; SubmatrixType sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType> sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType> sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm3 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm4 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; using blaze::rowMajor; typedef blaze::DynamicMatrix<double,rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType,aligned> SubmatrixType; MatrixType D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column SubmatrixType dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned SubmatrixType dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; SparseMatrixType A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<SparseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_submatrices Submatrices on Submatrices // <hr> // // It is also possible to create a submatrix view on another submatrix. In this context it is // important to remember that the type returned by the \c submatrix() function is the same type // as the type of the given submatrix, since the view on a submatrix is just another view on the // underlying matrix: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType D1; // ... Resizing and initialization // Creating a submatrix view on the dense matrix D1 SubmatrixType sm1 = submatrix( D1, 4UL, 4UL, 8UL, 16UL ); // Creating a submatrix view on the dense submatrix sm1 SubmatrixType sm2 = submatrix( sm1, 1UL, 1UL, 4UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Submatrix; typedef SymmetricMatrix< DynamicMatrix<int> > SymmetricDynamicType; typedef Submatrix< SymmetricDynamicType > SubmatrixType; // Setup of a 16x16 symmetric matrix SymmetricDynamicType A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 SubmatrixType sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_class The Row Class Template // <hr> // // The blaze::Row class template represents a reference to a specific row of a dense or sparse // matrix primitive. It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Row; \endcode // \c MT specifies the type of the matrix primitive. Row can be used with every matrix primitive, // but does not work with any matrix expression type. // // // \n \section views_rows_setup Setup of Rows // <hr> // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // This reference can be treated as any other row vector, i.e. it can be assigned to, it can be // copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,rowVector> DenseVectorType; typedef blaze::CompressedVector<double,rowVector> SparseVectorType; typedef blaze::DynamicMatrix<double,rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,rowMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x blaze::Row<DenseMatrixType> row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // The \c row() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, rows cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. For instance, the current number of elements // can be obtained via the \c size() function, the current capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since rows are // references to specific rows of a matrix, several operations are not possible on views, such // as resizing and swapping. The following example shows this by means of a dense row view: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A RowType row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix RowType row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of the row can be directly accessed with the subscript operator. The numbering // of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of // a row can be traversed via iterators. Just as with vectors, in case of non-const rows, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant row a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } for( RowType::ConstIterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 10UL, 100UL ); // Non-initialized 10x100 matrix typedef blaze::Row<MatrixType> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrix; DenseMatrix A( 4UL, 2UL ); // Non-initialized 4x2 matrix typedef blaze::Row<DenseMatrix> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st row of a column-major matrix A RowType row1 = row( A, 1UL ); for( RowType::Iterator it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices CompressedMatrix<double,columnMajor> A( 128UL, 128UL ); CompressedMatrix<double,columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... CompressedVector<double,rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. CompressedVector<double,rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_columns_class The Column Class Template // <hr> // // The blaze::Column class template represents a reference to a specific column of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Column; \endcode // \c MT specifies the type of the matrix primitive. Column can be used with every matrix // primitive, but does not work with any matrix expression type. // // // \n \section views_colums_setup Setup of Columns // <hr> // // Similar to the setup of a row, a reference to a dense or sparse column can be created very // conveniently via the \c column() function. This reference can be treated as any other column // vector, i.e. it can be assigned to, copied from, and be used in arithmetic operations. The // column can either be used as an alias to grant write access to a specific column of a matrix // primitive on the left-hand side of an assignment or to grant read-access to a specific column // of a matrix primitive or expression on the right-hand side of an assignment. The following // two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,columnVector> DenseVectorType; typedef blaze::CompressedVector<double,columnVector> SparseVectorType; typedef blaze::DynamicMatrix<double,columnMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,columnMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x blaze::Column<DenseMatrixType> col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // The \c column() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, columns cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. For instance, the current number of // elements can be obtained via the \c size() function, the current capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // columns are references to specific columns of a matrix, several operations are not possible on // views, such as resizing and swapping. The following example shows this by means of a dense // column view: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A ColumnType col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix ColumnType col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of the column can be directly accessed with the subscript operator. The numbering // of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of // a column can be traversed via iterators. Just as with vectors, in case of non-const columns, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant column a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } for( ColumnType::ConstIterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 100UL, 10UL ); // Non-initialized 10x100 matrix typedef blaze::Column<MatrixType> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 2UL, 4UL ); // Non-initialized 2x4 matrix typedef blaze::Column<DenseMatrix> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st column of a row-major matrix A ColumnType col1 = column( A, 1UL ); for( ColumnType::Iterator it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices CompressedMatrix<double,rowMajor> A( 128UL, 128UL ); CompressedMatrix<double,rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... CompressedVector<double,columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th column of the row-major matrix B with A. CompressedVector<double,columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix B would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // The matrix/matrix multiplication can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. Note however that the highest performance for a multiplication between two dense // matrices can be expected for two matrices with the same scalar element type. // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref custom_operations */ //************************************************************************************************* //**Custom Operations****************************************************************************** /*!\page custom_operations Custom Operations // // In addition to the provided operations on vectors and matrices it is possible to define custom // operations. For this purpose, \b Blaze provides the \c forEach() function, which allows to pass // the required operation via functor or lambda: \code blaze::DynamicMatrix<double> A, B; B = forEach( A, []( double d ){ return std::sqrt( d ); } ); \endcode // This example demonstrates the most convenient way of defining a custom operation by passing a // lambda to the \c forEach() function. The lambda is executed on each single element of a dense // vector or matrix or each non-zero element of a sparse vector or matrix. // // Alternatively, it is possible to pass a custom functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = forEach( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c forEach() it must define a function call // operator, which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::cint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::cuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::cint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::cuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::cint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::cuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::cint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::cuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::cfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::cdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } simd_double_t load( simd_double_t a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( T a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_TYPE( T ); return sqrt( a ); } }; } // namespace blaze \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // One of the main motivations of the \b Blaze 1.x releases was to achieve maximum performance // on a single CPU core for all possible operations. However, today's CPUs are not single core // anymore, but provide several (homogeneous or heterogeneous) compute cores. In order to fully // exploit the performance potential of a multicore CPU, computations have to be parallelized // across all available cores of a CPU. For this purpose, \b Blaze provides three different // shared memory parallelization techniques: // // - \ref openmp_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // // In addition, \b Blaze provides means to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref custom_operations &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11 // threads are enabled on the command line, the OpenMP-based parallelization has priority and // is preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based on Boost // threads. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations // have priority, i.e. are preferred in case either is enabled in combination with the Boost // thread parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dotu( int n, const float* x, int incX, const float* y, int incY ); double dotu( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotu( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotu( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): \code namespace blaze { float dotc( int n, const float* x, int incX, const float* y, int incY ); double dotc( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotc( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotc( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()): \code namespace blaze { void axpy( int n, float alpha, const float* x, int incX, float* y, int incY ); void axpy( int n, double alpha, const double* x, int incX, double* y, int incY ); void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, complex<float>* y, int incY ); void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if the fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The function fails if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref configuration_files \n */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it might necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag of the vectors). Via the \c defaultTransposeFlag // value the default transpose flag for all vector of the \b Blaze library can be specified: \code constexpr bool defaultTransposeFlag = columnVector; \endcode // Valid settings for the \c defaultTransposeFlag are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c defaultStorageOrder value the default storage order for all // matrices of the \b Blaze library can be specified. \code constexpr bool defaultStorageOrder = rowMajor; \endcode // Valid settings for the \c defaultStorageOrder are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code constexpr size_t cacheSize = 3145728UL; \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // MIC intrinsics, depending on which instruction set is available. However, it is possible to // disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code constexpr bool usePadding = true; \endcode // If \c usePadding is set to \c true padding is enabled for all dense vectors and matrices, if // it is set to \c false padding is disabled. Note however that disabling padding can considerably // reduce the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code constexpr bool useStreaming = true; \endcode // If \c useStreaming is set to \c true streaming is enabled, if it is set to \c false streaming // is disabled. It is recommended to consult the target architecture's white papers to decide // whether streaming is beneficial or hurtful for performance. // // // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_example Example // <hr> // // The following example demonstrates a complete multiplication between a statically sized block // matrix and block vector: \code // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) typedef StaticMatrix<int,2UL,2UL,rowMajor> M2x2; typedef StaticVector<int,2UL,columnVector> V2; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) } { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_data_types \n */ //************************************************************************************************* //**Custom Data Types****************************************************************************** /*!\page custom_data_types Custom Data Types // // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct SubTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct MultTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct DivTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref error_reporting_customization \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref custom_data_types &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref error_reporting_customization */ //************************************************************************************************* #endif
GB_convert_sparse_to_bitmap_template.c
//------------------------------------------------------------------------------ // GB_convert_sparse_to_bitmap_template: convert A from sparse to bitmap //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { #if defined ( GB_ATYPE ) const GB_ATYPE *restrict Axold = (GB_ATYPE *) A->x ; GB_ATYPE *restrict Axnew = (GB_ATYPE *) Ax_new ; #endif int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,j) to be operated on by this task //------------------------------------------------------------------ int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, tid, k, kfirst, klast, pstart_Aslice, Ap, avlen) ; // the start of A(:,j) in the new bitmap int64_t pA_new = j * avlen ; //------------------------------------------------------------------ // convert A(:,j) from sparse to bitmap //------------------------------------------------------------------ if (nzombies == 0) { for (int64_t p = pA_start ; p < pA_end ; p++) { // A(i,j) has index i, value Axold [p] int64_t i = Ai [p] ; int64_t pnew = i + pA_new ; // move A(i,j) to its new place in the bitmap // Axnew [pnew] = Axold [p] GB_COPY (Axnew, pnew, Axold, p) ; Ab [pnew] = 1 ; } } else { for (int64_t p = pA_start ; p < pA_end ; p++) { // A(i,j) has index i, value Axold [p] int64_t i = Ai [p] ; if (!GB_IS_ZOMBIE (i)) { int64_t pnew = i + pA_new ; // move A(i,j) to its new place in the bitmap // Axnew [pnew] = Axold [p] GB_COPY (Axnew, pnew, Axold, p) ; Ab [pnew] = 1 ; } } } } } done = true ; } #undef GB_ATYPE
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) { for (t4=max(max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32)),ceild(32*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(16*t1+Nx+29,32)),floord(32*t2+Nx+28,32)),floord(32*t3+Nx+28,32)),floord(32*t1-32*t2+Nz+Nx+27,32));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),32*t4+30),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unaryop__lnot_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_fp32 // op(A') function: GB_tran__lnot_bool_fp32 // C type: bool // A type: float // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_fp32 ( bool *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
remarks_parallel_in_multiple_target_state_machines.c
// RUN: %clang_cc1 -verify=host -Rpass=openmp -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // RUN: %clang_cc1 -fexperimental-new-pass-manager -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // host-no-diagnostics void bar1(void) { // all-remark {{[OMP100] Potentially unknown OpenMP target region caller}} #pragma omp parallel // #0 // all-remark@#0 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // safe-remark@#0 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}} // force-remark@#0 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}} // force-remark@#0 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: <NONE>}} { } } void bar2(void) { // all-remark {{[OMP100] Potentially unknown OpenMP target region caller}} #pragma omp parallel // #1 // all-remark@#1 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // safe-remark@#1 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}} // force-remark@#1 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}} // force-remark@#1 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__6_wrapper, kernel ID: <NONE>}} { } } void foo1(void) { #pragma omp target teams // #2 // all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} // all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #3 // all-remark@#3 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#3 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} { } bar1(); #pragma omp parallel // #4 // all-remark@#4 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#4 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { } } } void foo2(void) { #pragma omp target teams // #5 // all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}} // all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #6 // all-remark@#6 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#6 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); #pragma omp parallel // #7 // all-remark@#7 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#7 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); } } void foo3(void) { #pragma omp target teams // #8 // all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}} // all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #9 // all-remark@#9 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#9 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); #pragma omp parallel // #10 // all-remark@#10 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#10 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); } } void spmd(void) { // Verify we do not emit the remarks above for "SPMD" regions. #pragma omp target teams #pragma omp parallel { } #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { } } // all-remark@* 5 {{OpenMP runtime call __kmpc_global_thread_num moved to}} // all-remark@* 12 {{OpenMP runtime call __kmpc_global_thread_num deduplicated}}
mmult_openmp.c
/** * @author Zachary M. Mattis * ECE 1166 * OpenMP Matrix Multiplication * February 19, 2019 * * This program is a matrix multiply program, * capable of multiplying an (M X N) * (N x P) matrix sizes. * The matrices of this program are stored in memory * in row-major order. * * This program was successfully compiled on Pitt's CRC cluster using the * compilation bash script, mmult.sh. The job was submited by the following * command: $ sbatch mmult.slurm */ /* Header Files */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> /* Macros */ #define SIZE_M 10 // rows A #define SIZE_N 10 // cols A, rows B #define SIZE_P 10 // cols B /* Function Declaration */ void printMatrix( double *matrix, int row, int col ); int main (int argc, char *argv[]) { // matrix memory double *matrix_a, *matrix_b, *matrix_c; matrix_a = calloc( SIZE_M * SIZE_N, sizeof(double) ); matrix_b = calloc( SIZE_N * SIZE_P, sizeof(double) ); matrix_c = calloc( SIZE_M * SIZE_P, sizeof(double) ); // variables int i, j, k; // init #pragma omp parallel for private(i, j) shared (matrix_a) default (none) for (i=0; i<SIZE_M; i++) for (j=0; j<SIZE_N; j++) matrix_a[i*SIZE_N+j]= i+j; #pragma omp parallel for private(i, j) shared (matrix_b) default (none) for (i=0; i<SIZE_N; i++) for (j=0; j<SIZE_P; j++) matrix_b[i*SIZE_P+j]= i*j; // timing information double time_start, time_end; time_start = omp_get_wtime(); printf("time_start: %f\n", time_start); // multiply #pragma omp parallel for private(i, j, k) shared (matrix_a, matrix_b, matrix_c) default (none) for (k=0; k<SIZE_P; k++) { for (i=0; i<SIZE_M; i++) { matrix_c[i*SIZE_P+k] = 0.0; for (j=0; j<SIZE_N; j++) { matrix_c[i*SIZE_P+k] += matrix_a[i*SIZE_N+j] * matrix_b[j*SIZE_P+k]; } } } // timing time_end = omp_get_wtime(); printf("time_end: %f\n", time_end); printf( "Execution time (s) for (%d x %d) * (%d x %d) = %f\n\n", SIZE_M, SIZE_N, SIZE_N, SIZE_P, time_end-time_start ); // print results //printMatrix( matrix_a, SIZE_M, SIZE_N ); //printMatrix( matrix_b, SIZE_N, SIZE_P ); //printMatrix( matrix_c, SIZE_M, SIZE_P); // finalize free(matrix_a); free(matrix_b); free(matrix_c); return EXIT_SUCCESS; } /** * Prints an x by y matrix to stdout * @param {double **} matrix Ptr to matrix * @param {int} row Num rows * @param {int} col Num cols */ void printMatrix( double *matrix, int row, int col ) { int i, j; printf("-------------------------------------------------------\n"); printf("Result Matrix:\n"); for (i=0; i<row; i++) { printf("\n"); for (j=0; j<col; j++) { printf("%6.2f ", matrix[i*col+j]); } } printf("\n-------------------------------------------------------\n"); printf ("Done.\n"); }
GB_binop__pow_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_int32 // A.*B function (eWiseMult): GB_AemultB__pow_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_int32 // C+=b function (dense accum): GB_Cdense_accumb__pow_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int32 // C=scalar+B GB_bind1st__pow_int32 // C=scalar+B' GB_bind1st_tran__pow_int32 // C=A+scalar GB_bind2nd__pow_int32 // C=A'+scalar GB_bind2nd_tran__pow_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_pow_int32 (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_pow_int32 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT32 || GxB_NO_POW_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__pow_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_pow_int32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_pow_int32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int32 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int32 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matflip.c
#include "matrix.h" MATRIX mat_fliplr(MATRIX A, MATRIX result) { int i, j, m, n; m = MatCol(A); n = MatRow(A); if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL) return mat_error(MAT_MALLOC); #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { result[i][j] = A[i][m-j-1]; } } return result; } MATRIX mat_flipud(MATRIX A, MATRIX result) { int i, j, m, n; m = MatCol(A); n = MatRow(A); if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL) return mat_error(MAT_MALLOC); #pragma omp parallel for private(j) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { result[i][j] = A[n-i-1][j]; } } return result; }
GB_unaryop__one_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_uint64_uint64 // op(A') function: GB_tran__one_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_uint64_uint64 ( uint64_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bshift_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__bshift_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint8) // C=scalar+B GB (_bind1st__bshift_uint8) // C=scalar+B' GB (_bind1st_tran__bshift_uint8) // C=A+scalar GB (_bind2nd__bshift_uint8) // C=A'+scalar GB (_bind2nd_tran__bshift_uint8) // C type: uint8_t // A type: uint8_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_bitshift_uint8 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bshift_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_bitshift_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = GB_bitshift_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint8 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_bitshift_uint8 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trmm_x_csr_n_lo_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT *acc_nnz = alpha_malloc(sizeof(ALPHA_INT) * mat->rows); memset(acc_nnz, '\0', mat->rows * sizeof(ALPHA_INT)); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT ar = 0; ar < mat->rows; ar++) { for (ALPHA_INT ai = mat->rows_start[ar]; ai < mat->rows_end[ar]; ai++) { if (mat->col_indx[ai] <= ar) { acc_nnz[ar] += 1; } } } for (ALPHA_INT i = 1; i < mat->rows; i++) { acc_nnz[i] += acc_nnz[i - 1]; } ALPHA_INT *partition = alpha_malloc((num_threads + 1) * sizeof(ALPHA_INT)); balanced_partition_row_by_nnz(acc_nnz,mat->rows, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_m_s = partition[tid]; ALPHA_INT local_m_e = partition[tid + 1]; for (ALPHA_INT r = local_m_s; r < local_m_e; ++r) { ALPHA_Number *Y = &y[index2(r, 0, ldy)]; for (ALPHA_INT c = 0; c <columns; c++) alpha_mul(Y[c], Y[c], beta); for (ALPHA_INT ai = mat->rows_start[r]; ai < mat->rows_end[r]; ai++) { ALPHA_INT ac = mat->col_indx[ai]; if (ac <= r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); const ALPHA_Number *X = &x[index2(ac, 0, ldx)]; for (ALPHA_INT c = 0; c <columns; ++c) alpha_madde(Y[c], val, X[c]); } } } } alpha_free(partition); alpha_free(acc_nnz); return ALPHA_SPARSE_STATUS_SUCCESS; }
seidel_2d-a.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> /* * N is the number of points * T is the number of timesteps */ #ifdef HAS_DECLS #include "decls.h" #else #define N 1000L #define T 1000L #endif #define NUM_FP_OPS 10 /* Define our arrays */ double A[2][N][N]; double total=0; double sum_err_sqr=0; int chtotal=0; int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; return x->tv_sec < y->tv_sec; } int main(int argc, char * argv[]) { long int t, i, j, k; const int BASE = 1024; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0; //printf("Number of points = %ld\t|Number of timesteps = %ld\t", N*N, T); /* Initialization */ srand(42); // seed with a constant value to verify results for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[0][i][j] = 1.0 * (rand() % BASE); } } #ifdef TIME gettimeofday(&start, 0); #endif // #undef N // #define N 8000L #undef T #define T 500 /* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((N >= 1) && (T >= 1)) { for (t1=-1;t1<=floord(T-1,4);t1++) { lbp=ceild(t1,2); ubp=min(floord(2*T+N-2,16),floord(8*t1+N+6,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(0,ceild(t1-63,64));t3<=min(floord(2*T+N-2,512),floord(8*t1+N+14,512));t3++) { if ((t1 <= floord(512*t3-N,8)) && (t2 <= 32*t3-1) && (t3 >= ceild(N,512))) { if (N%2 == 0) { for (t5=max(max(16*t2,512*t3-N+1),-16*t1+16*t2+1024*t3-2*N-13);t5<=min(16*t2+15,-16*t1+16*t2+1024*t3-2*N+2);t5++) { A[0][(-512*t3+t5+N-1)][(N-1)] = 0.111 * ( ( (-512*t3+t5+N-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-512*t3+t5+N-1) - 1][(N-1) - 1]) + A[1][(-512*t3+t5+N-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-512*t3+t5+N-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-512*t3+t5+N-1)][(N-1) - 1]) + A[1][(-512*t3+t5+N-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-512*t3+t5+N-1)][(N-1) + 1]) + ( (-512*t3+t5+N-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-512*t3+t5+N-1) + 1][(N-1) - 1]) + A[1][(-512*t3+t5+N-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-512*t3+t5+N-1) + 1][(N-1) + 1]) ) ) );; } } } if ((t1 <= floord(16*t2-N,8)) && (t2 >= ceild(N,16))) { if (N%2 == 0) { for (t6=max(512*t3,16*t2-N+1);t6<=min(16*t2,512*t3+511);t6++) { A[0][(N-1)][(-16*t2+t6+N-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t2+t6+N-1)==0 ? 0 : A[1][(N-1) - 1][(-16*t2+t6+N-1) - 1]) + A[1][(N-1) - 1][(-16*t2+t6+N-1)] + ((-16*t2+t6+N-1)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t2+t6+N-1) + 1]) ) ) + ((-16*t2+t6+N-1)==0 ? 0 : A[1][(N-1)][(-16*t2+t6+N-1) - 1]) + A[1][(N-1)][(-16*t2+t6+N-1)] + ((-16*t2+t6+N-1)==N-1 ? 0 : A[1][(N-1)][(-16*t2+t6+N-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t2+t6+N-1)==0 ? 0: A[1][(N-1) + 1][(-16*t2+t6+N-1) - 1]) + A[1][(N-1) + 1][(-16*t2+t6+N-1)] + ((-16*t2+t6+N-1)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t2+t6+N-1) + 1]) ) ) );; } } } if ((N >= 2) && (t1 == 2*t2) && (t1 <= floord(512*t3-N+511,8)) && (t1 >= ceild(512*t3-N+1,8))) { for (t6=max(8*t1,512*t3);t6<=8*t1+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-8*t1+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-8*t1+t6)==0 ? 0 : A[0][0 - 1][(-8*t1+t6) - 1]) + A[0][0 - 1][(-8*t1+t6)] + ((-8*t1+t6) == N-1 ? 0 : A[0][0 - 1][(-8*t1+t6) + 1]) ) ) + ((-8*t1+t6)==0 ? 0 : A[0][0][(-8*t1+t6) - 1]) + A[0][0][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0][(-8*t1+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-8*t1+t6)==0 ? 0: A[0][0 + 1][(-8*t1+t6) - 1]) + A[0][0 + 1][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0 + 1][(-8*t1+t6) + 1]) ) ) );; } } for (t5=8*t1+1;t5<=8*t1+2;t5++) { for (t6=max(512*t3,8*t1+1);t6<=8*t1+N;t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-1)][(-8*t1+t6-1)] = 0.111 * ( ( (-8*t1+t5-1)==0 ? 0 : ( ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) + 1]) ) ) + ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1)][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) + 1]) + ( (-8*t1+t5-1)==N-1 ? 0 : ( ((-8*t1+t6-1)==0 ? 0: A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) + 1]) ) ) );; } } } } if ((t1 == 2*t2) && (t1 >= ceild(512*t3-N+512,8))) { for (t6=max(8*t1,512*t3);t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-8*t1+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-8*t1+t6)==0 ? 0 : A[0][0 - 1][(-8*t1+t6) - 1]) + A[0][0 - 1][(-8*t1+t6)] + ((-8*t1+t6) == N-1 ? 0 : A[0][0 - 1][(-8*t1+t6) + 1]) ) ) + ((-8*t1+t6)==0 ? 0 : A[0][0][(-8*t1+t6) - 1]) + A[0][0][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0][(-8*t1+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-8*t1+t6)==0 ? 0: A[0][0 + 1][(-8*t1+t6) - 1]) + A[0][0 + 1][(-8*t1+t6)] + ((-8*t1+t6)==N-1 ? 0 : A[0][0 + 1][(-8*t1+t6) + 1]) ) ) );; } } for (t5=8*t1+1;t5<=8*t1+2;t5++) { for (t6=max(512*t3,8*t1+1);t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-8*t1+t5-1)][(-8*t1+t6-1)] = 0.111 * ( ( (-8*t1+t5-1)==0 ? 0 : ( ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) - 1][(-8*t1+t6-1) + 1]) ) ) + ((-8*t1+t6-1)==0 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1)][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1)][(-8*t1+t6-1) + 1]) + ( (-8*t1+t5-1)==N-1 ? 0 : ( ((-8*t1+t6-1)==0 ? 0: A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) - 1]) + A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1)] + ((-8*t1+t6-1)==N-1 ? 0 : A[1][(-8*t1+t5-1) + 1][(-8*t1+t6-1) + 1]) ) ) );; } } } } if ((N == 1) && (t1 == 2*t2)) { if (t1%2 == 0) { A[1][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[0][0 - 1][0 - 1]) + A[0][0 - 1][0] + (0 == N-1 ? 0 : A[0][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][0][0 - 1]) + A[0][0][0] + (0==N-1 ? 0 : A[0][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[0][0 + 1][0 - 1]) + A[0][0 + 1][0] + (0==N-1 ? 0 : A[0][0 + 1][0 + 1]) ) ) );; } if (t1%2 == 0) { A[0][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[1][0 - 1][0 - 1]) + A[1][0 - 1][0] + (0==N-1 ? 0 : A[1][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[1][0][0 - 1]) + A[1][0][0] + (0==N-1 ? 0 : A[1][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[1][0 + 1][0 - 1]) + A[1][0 + 1][0] + (0==N-1 ? 0 : A[1][0 + 1][0 + 1]) ) ) );; } } for (t4=max(max(max(0,ceild(512*t3-N+1,2)),4*t1),8*t1-8*t2+8);t4<=min(min(min(min(floord(512*t3-N+511,2),floord(16*t1-16*t2+N-1,2)),T-1),4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=512*t3;t6<=2*t4+N;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(512*t3-N+1,2)),4*t1);t4<=min(min(min(min(floord(8*t1+N-9,2),floord(512*t3-N+511,2)),T-1),4*t1+3),256*t3-1);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+9;t5<=min(2*t4+N,-8*t1+4*t4+10);t5++) { for (t6=512*t3;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(0,ceild(512*t3-N+512,2)),4*t1),8*t1-8*t2+8);t4<=min(min(min(floord(16*t1-16*t2+N-1,2),T-1),4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(512*t3-N+512,2)),4*t1);t4<=min(min(T-1,4*t1+3),256*t3-1);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+9;t5<=-8*t1+4*t4+10;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+1,2),4*t1+1);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+3),256*t3-1);t4++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+1;t5<=-8*t1+4*t4+2;t5++) { for (t6=512*t3;t6<=2*t4+N;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+1,2),4*t1+4);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+7),256*t3-1);t4++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+512,2),4*t1+1);t4<=min(min(T-1,4*t1+3),256*t3-1);t4++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+1;t5<=-8*t1+4*t4+2;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(ceild(512*t3-N+512,2),4*t1+4);t4<=min(min(T-1,4*t1+7),256*t3-1);t4++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if ((N == 1) && (t1 == 2*t2)) { for (t4=4*t1+1;t4<=min(T-1,4*t1+7);t4++) { if (t1%2 == 0) { A[1][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[0][0 - 1][0 - 1]) + A[0][0 - 1][0] + (0 == N-1 ? 0 : A[0][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][0][0 - 1]) + A[0][0][0] + (0==N-1 ? 0 : A[0][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[0][0 + 1][0 - 1]) + A[0][0 + 1][0] + (0==N-1 ? 0 : A[0][0 + 1][0 + 1]) ) ) );; } if (t1%2 == 0) { A[0][0][0] = 0.111 * ( ( 0==0 ? 0 : ( (0==0 ? 0 : A[1][0 - 1][0 - 1]) + A[1][0 - 1][0] + (0==N-1 ? 0 : A[1][0 - 1][0 + 1]) ) ) + (0==0 ? 0 : A[1][0][0 - 1]) + A[1][0][0] + (0==N-1 ? 0 : A[1][0][0 + 1]) + ( 0==N-1 ? 0 : ( (0==0 ? 0: A[1][0 + 1][0 - 1]) + A[1][0 + 1][0] + (0==N-1 ? 0 : A[1][0 + 1][0 + 1]) ) ) );; } } } for (t4=max(max(4*t1,256*t3),8*t1-8*t2+8);t4<=min(min(min(floord(512*t3-N+511,2),floord(16*t1-16*t2+N-1,2)),T-1),4*t1+3);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(4*t1,256*t3);t4<=min(min(min(floord(8*t1+N-9,2),floord(512*t3-N+511,2)),T-1),4*t1+3);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+9;t5<=min(2*t4+N,-8*t1+4*t4+10);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(ceild(512*t3-N+512,2),4*t1),256*t3),8*t1-8*t2+8);t4<=min(min(floord(16*t1-16*t2+N-1,2),T-1),4*t1+3);t4++) { for (t5=16*t2;t5<=-16*t1+16*t2+4*t4;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4+1;t5<=min(2*t4+N,-16*t1+16*t2+4*t4+2);t5++) { for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if (t1 == 2*t2-1) { for (t4=max(max(ceild(512*t3-N+512,2),4*t1),256*t3);t4<=min(min(floord(8*t1+N-9,2),T-1),4*t1+3);t4++) { for (t5=8*t1+8;t5<=-8*t1+4*t4+8;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+9;t5<=min(2*t4+N,-8*t1+4*t4+10);t5++) { for (t6=2*t4+1;t6<=512*t3+511;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(max(0,ceild(16*t1-16*t2+N,2)),ceild(16*t2-N+1,2)),ceild(512*t3-N+1,2)),8*t1-8*t2+8);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=512*t3;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1 == 2*t2-1) && (t1 == 64*t3-1)) { for (t4=max(max(0,ceild(8*t1-N+9,2)),ceild(8*t1+N-8,2));t4<=min(T-1,4*t1+3);t4++) { for (t5=8*t1+8;t5<=2*t4+N-1;t5++) { for (t6=8*t1+8;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } for (t6=8*t1+8;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(0,ceild(16*t1-16*t2+N,2)),ceild(16*t2-N+1,2)),ceild(512*t3-N+512,2));t4<=min(min(T-1,4*t1+3),256*t3-1);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(ceild(512*t3-N+1,2),4*t1+4);t4<=min(min(min(min(floord(16*t2-N+15,2),floord(512*t3-N+511,2)),floord(16*t1-16*t2+N+12,2)),T-1),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=512*t3;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(ceild(16*t2-N+16,2),ceild(512*t3-N+1,2)),4*t1+4),8*t1-8*t2+8);t4<=min(min(min(floord(512*t3-N+511,2),T-1),4*t1+7),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { for (t6=512*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } if (t1 <= min(min(min(floord(16*t2-N+1,8),floord(8*t2+256*t3-N+249,8)),floord(16*t2+2*T-N-15,16)),floord(16*t2+512*t3-N-15,16))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=512*t3;t6<=16*t1-16*t2+2*N+12;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=512*t3;t6<=16*t1-16*t2+2*N+13;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } for (t4=max(ceild(512*t3-N+512,2),4*t1+4);t4<=min(min(min(floord(16*t2-N+15,2),floord(16*t1-16*t2+N+12,2)),T-1),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(ceild(16*t2-N+16,2),ceild(512*t3-N+512,2)),4*t1+4),8*t1-8*t2+8);t4<=min(min(T-1,4*t1+7),256*t3-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if ((t1 <= min(min(floord(16*t2-N+1,8),floord(16*t2+2*T-N-15,16)),floord(16*t2+512*t3-N-15,16))) && (t1 >= ceild(8*t2+256*t3-N+251,8))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=512*t3;t6<=512*t3+511;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=512*t3;t6<=512*t3+511;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } if (t1 == 2*t2) { for (t4=max(256*t3,4*t1+1);t4<=min(min(min(floord(8*t1+N-1,2),floord(512*t3-N+511,2)),T-1),4*t1+3);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t5=-8*t1+4*t4+1;t5<=min(2*t4+N,-8*t1+4*t4+2);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 == 2*t2) { for (t4=max(256*t3,4*t1+4);t4<=min(min(min(floord(8*t1+N-1,2),floord(512*t3-N+511,2)),T-1),4*t1+7);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(max(ceild(512*t3-N+512,2),256*t3),4*t1+1);t4<=min(T-1,4*t1+3);t4++) { for (t6=2*t4;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=-8*t1+4*t4;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } for (t5=-8*t1+4*t4+1;t5<=-8*t1+4*t4+2;t5++) { for (t6=2*t4+1;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(max(ceild(16*t1-16*t2+N,2),ceild(16*t2-N+1,2)),256*t3),8*t1-8*t2+8);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+3);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1 == 2*t2-1) { for (t4=max(max(ceild(8*t1-N+9,2),ceild(8*t1+N-8,2)),256*t3);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+3);t4++) { for (t5=8*t1+8;t5<=2*t4+N-1;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } for (t4=max(max(max(max(ceild(16*t1-16*t2+N,2),ceild(16*t2-N+1,2)),ceild(512*t3-N+512,2)),256*t3),8*t1-8*t2+8);t4<=min(T-1,4*t1+3);t4++) { for (t5=16*t2;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if ((t1 == 2*t2-1) && (t1 == 64*t3+63)) { for (t4=max(ceild(8*t1-N+9,2),ceild(8*t1+N-8,2));t4<=min(T-1,4*t1+3);t4++) { for (t5=8*t1+8;t5<=2*t4+N-1;t5++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } } for (t6=2*t4+1;t6<=8*t1+7;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } for (t6=2*t4+1;t6<=8*t1+7;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%64 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } for (t4=max(max(256*t3,4*t1+4),8*t1-8*t2+8);t4<=min(min(min(floord(16*t2-N+15,2),floord(512*t3-N+511,2)),floord(16*t1-16*t2+N+12,2)),T-1);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(ceild(16*t2-N+16,2),256*t3),4*t1+4),8*t1-8*t2+8);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+7);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } if ((N >= 3) && (t1 <= min(min(floord(16*t2-N+1,8),floord(8*t2+256*t3-N+249,8)),floord(16*t2+2*T-N-15,16))) && (t1 >= ceild(16*t2+512*t3-N-13,16))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=16*t1-16*t2+N+13;t6<=16*t1-16*t2+2*N+12;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=16*t1-16*t2+N+14;t6<=16*t1-16*t2+2*N+13;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } for (t4=max(max(ceild(512*t3-N+512,2),256*t3),4*t1+4);t4<=min(min(min(floord(16*t2-N+15,2),floord(16*t1-16*t2+N+12,2)),T-1),256*t3+255);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=2*t4+N-1;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } for (t4=max(max(max(max(ceild(16*t2-N+16,2),ceild(512*t3-N+512,2)),256*t3),4*t1+4),8*t1-8*t2+8);t4<=min(min(T-1,4*t1+7),256*t3+255);t4++) { for (t5=-16*t1+16*t2+4*t4-15;t5<=-16*t1+16*t2+4*t4-14;t5++) { for (t6=2*t4;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=-16*t1+16*t2+4*t4-13;t5<=16*t2+15;t5++) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; for (t6=2*t4+1;t6<=512*t3+511;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } if ((t1 <= min(min(floord(16*t2-N+1,8),floord(16*t2+2*T-N-15,16)),floord(16*t2+512*t3-N+497,16))) && (t1 >= max(ceild(8*t2+256*t3-N+251,8),ceild(16*t2+512*t3-N-13,16)))) { if ((N+1)%2 == 0) { for (t5=16*t1-16*t2+2*N+11;t5<=16*t1-16*t2+2*N+12;t5++) { for (t6=16*t1-16*t2+N+13;t6<=512*t3+511;t6++) { A[1][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] = 0.111 * ( ( (-16*t1+16*t2+t5-N-13)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13) == N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) - 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) + ((-16*t1+16*t2+t6-N-13)==0 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13)][(-16*t1+16*t2+t6-N-13) + 1]) + ( (-16*t1+16*t2+t5-N-13)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-13)==0 ? 0: A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) - 1]) + A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13)] + ((-16*t1+16*t2+t6-N-13)==N-1 ? 0 : A[0][(-16*t1+16*t2+t5-N-13) + 1][(-16*t1+16*t2+t6-N-13) + 1]) ) ) );; } } for (t6=16*t1-16*t2+N+14;t6<=512*t3+511;t6++) { A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[1][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } if ((N >= 2) && (t1 == 2*t2)) { for (t4=max(ceild(8*t1+N,2),256*t3);t4<=min(floord(8*t1-N+15,2),T-1);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=2*t4+N-1;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } for (t6=2*t4+1;t6<=2*t4+N;t6++) { if (t1%2 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + A[1][(N-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(N-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(N-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(max(ceild(8*t1+N,2),ceild(8*t1-N+16,2)),256*t3);t4<=min(min(floord(512*t3-N+511,2),T-1),4*t1+7);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(N-1) + 1]) ) ) + ((N-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + A[1][(-2*t4+t5-1)][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((N-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(N-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(N-1)] + ((N-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1) + 1]) ) ) );; } } } } if (t1 == 2*t2) { for (t4=max(max(ceild(512*t3-N+512,2),256*t3),4*t1+4);t4<=min(T-1,4*t1+7);t4++) { for (t6=2*t4;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.111 * ( ( 0==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][0 - 1][(-2*t4+t6) - 1]) + A[0][0 - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][0 - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + A[0][0][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0][(-2*t4+t6) + 1]) + ( 0==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][0 + 1][(-2*t4+t6) - 1]) + A[0][0 + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][0 + 1][(-2*t4+t6) + 1]) ) ) );; } } for (t5=2*t4+1;t5<=8*t1+15;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( (0==0 ? 0 : A[0][(-2*t4+t5) - 1][0 - 1]) + A[0][(-2*t4+t5) - 1][0] + (0 == N-1 ? 0 : A[0][(-2*t4+t5) - 1][0 + 1]) ) ) + (0==0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + A[0][(-2*t4+t5)][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5)][0 + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( (0==0 ? 0: A[0][(-2*t4+t5) + 1][0 - 1]) + A[0][(-2*t4+t5) + 1][0] + (0==N-1 ? 0 : A[0][(-2*t4+t5) + 1][0 + 1]) ) ) );; } for (t6=2*t4+1;t6<=512*t3+511;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.111 * ( ( (-2*t4+t5)==0 ? 0 : ( ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) - 1][(-2*t4+t6)] + ((-2*t4+t6) == N-1 ? 0 : A[0][(-2*t4+t5) - 1][(-2*t4+t6) + 1]) ) ) + ((-2*t4+t6)==0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5)][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + ( (-2*t4+t5)==N-1 ? 0 : ( ((-2*t4+t6)==0 ? 0: A[0][(-2*t4+t5) + 1][(-2*t4+t6) - 1]) + A[0][(-2*t4+t5) + 1][(-2*t4+t6)] + ((-2*t4+t6)==N-1 ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6) + 1]) ) ) );; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.111 * ( ( (-2*t4+t5-1)==0 ? 0 : ( ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1) + 1]) ) ) + ((-2*t4+t6-1)==0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1)][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + ( (-2*t4+t5-1)==N-1 ? 0 : ( ((-2*t4+t6-1)==0 ? 0: A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) - 1]) + A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)] + ((-2*t4+t6-1)==N-1 ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1) + 1]) ) ) );; } } } } } if (t1 <= min(min(floord(16*t2-N,8),floord(16*t2+2*T-N-16,16)),floord(16*t2+512*t3-N+496,16))) { if (N%2 == 0) { for (t6=max(512*t3,16*t1-16*t2+N+14);t6<=min(512*t3+511,16*t1-16*t2+2*N+13);t6++) { A[1][(N-1)][(-16*t1+16*t2+t6-N-14)] = 0.111 * ( ( (N-1)==0 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[0][(N-1) - 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[0][(N-1) - 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14) == N-1 ? 0 : A[0][(N-1) - 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) + ((-16*t1+16*t2+t6-N-14)==0 ? 0 : A[0][(N-1)][(-16*t1+16*t2+t6-N-14) - 1]) + A[0][(N-1)][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[0][(N-1)][(-16*t1+16*t2+t6-N-14) + 1]) + ( (N-1)==N-1 ? 0 : ( ((-16*t1+16*t2+t6-N-14)==0 ? 0: A[0][(N-1) + 1][(-16*t1+16*t2+t6-N-14) - 1]) + A[0][(N-1) + 1][(-16*t1+16*t2+t6-N-14)] + ((-16*t1+16*t2+t6-N-14)==N-1 ? 0 : A[0][(N-1) + 1][(-16*t1+16*t2+t6-N-14) + 1]) ) ) );; } } } } } } } /* End of CLooG code */ #undef T #define T 1000 // #undef N // #define N 16000L #ifdef TIME gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); printf("%7.5lf", tdiff); //printf("|Time taken = %7.5lfs\n", tdiff ); //printf("|MFLOPS = %f\n", ((((double)NUM_FP_OPS * N *N * T) / tdiff) / 1000000L)); #endif #ifdef VERIFY for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { total+= A[T%2][i][j] ; } } printf("|sum: %e\t", total); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { sum_err_sqr += (A[T%2][i][j] - (total/N))*(A[T%2][i][j] - (total/N)); } } printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { chtotal += ((char *)A[T%2][i])[j]; } } printf("|sum(rep(A)) = %d\n", chtotal); #endif return 0; } // icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm // /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/ // /* @ begin PrimeRegTile (scalar_replacement=0; T1t3=8; T1t4=8; ) @*/ // /* @ end @*/
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % John Cristy % % April 1993 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/segment.h" #include "magick/string_.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static inline ssize_t MagickAbsoluteValue(const ssize_t x) { if (x < 0) return(-x); return(x); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireMagickMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) return(0.0); for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireQuantumMemory(256, sizeof(*second_derivative)); if ((derivative == (MagickRealType *) NULL) || (second_derivative == (MagickRealType *) NULL)) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDerivatives"); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) return(0.0); /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(MagickRealType) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { MagickRealType alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (MagickRealType *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(MagickRealType) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(MagickRealType *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } if (IsRGBColorspace(colorspace) == MagickFalse) (void) TransformImageColorspace(image,colorspace); /* Initialize histogram. */ InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); if (IsRGBColorspace(colorspace) == MagickFalse) (void) TransformImageColorspace(image,colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
GB_unaryop__lnot_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_uint8 // op(A') function: GB_tran__lnot_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_uint8 ( uint16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__round_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__round_fc32_fc32 // op(A') function: GB_unop_tran__round_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_croundf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_croundf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_croundf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ROUND || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__round_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_croundf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_croundf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__round_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__asinh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fc32_fc32) // op(A') function: GB (_unop_tran__asinh_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = casinhf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = casinhf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = casinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = casinhf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = casinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
target-14.c
#include <omp.h> #include <stdlib.h> int main () { int d = omp_get_default_device (); int id = omp_get_initial_device (); int err; void *p; if (d < 0 || d >= omp_get_num_devices ()) d = id; p = omp_target_alloc (128 * sizeof (int), d); if (p == NULL) return 0; #pragma omp target is_device_ptr (p) if (d >= 0) device (d >= 0 ? d : 0) { int i, *q = (int *) p; for (i = 0; i < 128; i++) q[i] = i + 7; } #pragma omp target is_device_ptr (p) if (d >= 0) device (d >= 0 ? d : 0) map(from:err) { int i; err = 0; for (i = 0; i < 128; i++) if (((int *) p)[i] != i + 7) err = 1; } if (err) abort (); omp_target_free (p, d); return 0; }
point_relax.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" /* this currently cannot be greater than 7 */ #ifdef MAX_DEPTH #undef MAX_DEPTH #endif #define MAX_DEPTH 7 /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ typedef struct { MPI_Comm comm; HYPRE_Real tol; /* tolerance, set =0 for no convergence testing */ HYPRE_Real rresnorm; /* relative residual norm, computed only if tol>0.0 */ HYPRE_Int max_iter; HYPRE_Int rel_change; /* not yet used */ HYPRE_Int zero_guess; HYPRE_Real weight; HYPRE_Int num_pointsets; HYPRE_Int *pointset_sizes; HYPRE_Int *pointset_ranks; hypre_Index *pointset_strides; hypre_Index **pointset_indices; hypre_StructMatrix *A; hypre_StructVector *b; hypre_StructVector *x; hypre_StructVector *t; HYPRE_Int diag_rank; hypre_ComputePkg **compute_pkgs; /* log info (always logged) */ HYPRE_Int num_iterations; HYPRE_Int time_index; HYPRE_Int flops; } hypre_PointRelaxData; /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ void * hypre_PointRelaxCreate( MPI_Comm comm ) { hypre_PointRelaxData *relax_data; hypre_Index stride; hypre_Index indices[1]; relax_data = hypre_CTAlloc(hypre_PointRelaxData, 1); (relax_data -> comm) = comm; (relax_data -> time_index) = hypre_InitializeTiming("PointRelax"); /* set defaults */ (relax_data -> tol) = 0.0; /* tol=0 means no convergence testing */ (relax_data -> rresnorm) = 0.0; (relax_data -> max_iter) = 1000; (relax_data -> rel_change) = 0; (relax_data -> zero_guess) = 0; (relax_data -> weight) = 1.0; (relax_data -> num_pointsets) = 0; (relax_data -> pointset_sizes) = NULL; (relax_data -> pointset_ranks) = NULL; (relax_data -> pointset_strides) = NULL; (relax_data -> pointset_indices) = NULL; (relax_data -> A) = NULL; (relax_data -> b) = NULL; (relax_data -> x) = NULL; (relax_data -> t) = NULL; (relax_data -> compute_pkgs) = NULL; hypre_SetIndex3(stride, 1, 1, 1); hypre_SetIndex3(indices[0], 0, 0, 0); hypre_PointRelaxSetNumPointsets((void *) relax_data, 1); hypre_PointRelaxSetPointset((void *) relax_data, 0, 1, stride, indices); return (void *) relax_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxDestroy( void *relax_vdata ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Int i; if (relax_data) { for (i = 0; i < (relax_data -> num_pointsets); i++) { hypre_TFree(relax_data -> pointset_indices[i]); } if (relax_data -> compute_pkgs) { for (i = 0; i < (relax_data -> num_pointsets); i++) { hypre_ComputePkgDestroy(relax_data -> compute_pkgs[i]); } } hypre_TFree(relax_data -> pointset_sizes); hypre_TFree(relax_data -> pointset_ranks); hypre_TFree(relax_data -> pointset_strides); hypre_TFree(relax_data -> pointset_indices); hypre_StructMatrixDestroy(relax_data -> A); hypre_StructVectorDestroy(relax_data -> b); hypre_StructVectorDestroy(relax_data -> x); hypre_StructVectorDestroy(relax_data -> t); hypre_TFree(relax_data -> compute_pkgs); hypre_FinalizeTiming(relax_data -> time_index); hypre_TFree(relax_data); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetup( void *relax_vdata, hypre_StructMatrix *A, hypre_StructVector *b, hypre_StructVector *x ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Int num_pointsets = (relax_data -> num_pointsets); HYPRE_Int *pointset_sizes = (relax_data -> pointset_sizes); hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_Index **pointset_indices = (relax_data -> pointset_indices); HYPRE_Int ndim = hypre_StructMatrixNDim(A); hypre_StructVector *t; HYPRE_Int diag_rank; hypre_ComputeInfo *compute_info; hypre_ComputePkg **compute_pkgs; hypre_Index diag_index; hypre_IndexRef stride; hypre_IndexRef index; hypre_StructGrid *grid; hypre_StructStencil *stencil; hypre_BoxArrayArray *orig_indt_boxes; hypre_BoxArrayArray *orig_dept_boxes; hypre_BoxArrayArray *box_aa; hypre_BoxArray *box_a; hypre_Box *box; HYPRE_Int box_aa_size; HYPRE_Int box_a_size; hypre_BoxArrayArray *new_box_aa; hypre_BoxArray *new_box_a; hypre_Box *new_box; HYPRE_Real scale; HYPRE_Int frac; HYPRE_Int i, j, k, p, m, compute_i; /*---------------------------------------------------------- * Set up the temp vector *----------------------------------------------------------*/ if ((relax_data -> t) == NULL) { t = hypre_StructVectorCreate(hypre_StructVectorComm(b), hypre_StructVectorGrid(b)); hypre_StructVectorSetNumGhost(t, hypre_StructVectorNumGhost(b)); hypre_StructVectorInitialize(t); hypre_StructVectorAssemble(t); (relax_data -> t) = t; } /*---------------------------------------------------------- * Find the matrix diagonal *----------------------------------------------------------*/ grid = hypre_StructMatrixGrid(A); stencil = hypre_StructMatrixStencil(A); hypre_SetIndex3(diag_index, 0, 0, 0); diag_rank = hypre_StructStencilElementRank(stencil, diag_index); /*---------------------------------------------------------- * Set up the compute packages *----------------------------------------------------------*/ compute_pkgs = hypre_CTAlloc(hypre_ComputePkg *, num_pointsets); for (p = 0; p < num_pointsets; p++) { hypre_CreateComputeInfo(grid, stencil, &compute_info); orig_indt_boxes = hypre_ComputeInfoIndtBoxes(compute_info); orig_dept_boxes = hypre_ComputeInfoDeptBoxes(compute_info); stride = pointset_strides[p]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: box_aa = orig_indt_boxes; break; case 1: box_aa = orig_dept_boxes; break; } box_aa_size = hypre_BoxArrayArraySize(box_aa); new_box_aa = hypre_BoxArrayArrayCreate(box_aa_size, ndim); for (i = 0; i < box_aa_size; i++) { box_a = hypre_BoxArrayArrayBoxArray(box_aa, i); box_a_size = hypre_BoxArraySize(box_a); new_box_a = hypre_BoxArrayArrayBoxArray(new_box_aa, i); hypre_BoxArraySetSize(new_box_a, box_a_size * pointset_sizes[p]); k = 0; for (m = 0; m < pointset_sizes[p]; m++) { index = pointset_indices[p][m]; for (j = 0; j < box_a_size; j++) { box = hypre_BoxArrayBox(box_a, j); new_box = hypre_BoxArrayBox(new_box_a, k); hypre_CopyBox(box, new_box); hypre_ProjectBox(new_box, index, stride); k++; } } } switch(compute_i) { case 0: hypre_ComputeInfoIndtBoxes(compute_info) = new_box_aa; break; case 1: hypre_ComputeInfoDeptBoxes(compute_info) = new_box_aa; break; } } hypre_CopyIndex(stride, hypre_ComputeInfoStride(compute_info)); hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(x), 1, grid, &compute_pkgs[p]); hypre_BoxArrayArrayDestroy(orig_indt_boxes); hypre_BoxArrayArrayDestroy(orig_dept_boxes); } /*---------------------------------------------------------- * Set up the relax data structure *----------------------------------------------------------*/ (relax_data -> A) = hypre_StructMatrixRef(A); (relax_data -> x) = hypre_StructVectorRef(x); (relax_data -> b) = hypre_StructVectorRef(b); (relax_data -> diag_rank) = diag_rank; (relax_data -> compute_pkgs) = compute_pkgs; /*----------------------------------------------------- * Compute flops *-----------------------------------------------------*/ scale = 0.0; for (p = 0; p < num_pointsets; p++) { stride = pointset_strides[p]; frac = hypre_IndexX(stride); frac *= hypre_IndexY(stride); frac *= hypre_IndexZ(stride); scale += (pointset_sizes[p] / frac); } (relax_data -> flops) = scale * (hypre_StructMatrixGlobalSize(A) + hypre_StructVectorGlobalSize(x)); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelax( void *relax_vdata, hypre_StructMatrix *A, hypre_StructVector *b, hypre_StructVector *x ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Int max_iter = (relax_data -> max_iter); HYPRE_Int zero_guess = (relax_data -> zero_guess); HYPRE_Real weight = (relax_data -> weight); HYPRE_Int num_pointsets = (relax_data -> num_pointsets); HYPRE_Int *pointset_ranks = (relax_data -> pointset_ranks); hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_StructVector *t = (relax_data -> t); HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_ComputePkg **compute_pkgs = (relax_data -> compute_pkgs); HYPRE_Real tol = (relax_data -> tol); HYPRE_Real tol2 = tol*tol; hypre_ComputePkg *compute_pkg; hypre_CommHandle *comm_handle; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *A_data_box; hypre_Box *b_data_box; hypre_Box *x_data_box; hypre_Box *t_data_box; HYPRE_Real *Ap; HYPRE_Real AAp0; HYPRE_Real *bp; HYPRE_Real *xp; HYPRE_Real *tp; void *matvec_data = NULL; HYPRE_Int Ai; HYPRE_Int bi; HYPRE_Int xi; HYPRE_Int ti; hypre_IndexRef stride; hypre_IndexRef start; hypre_Index loop_size; HYPRE_Int constant_coefficient; HYPRE_Int iter, p, compute_i, i, j; HYPRE_Int pointset; HYPRE_Real bsumsq, rsumsq; /*---------------------------------------------------------- * Initialize some things and deal with special cases *----------------------------------------------------------*/ hypre_BeginTiming(relax_data -> time_index); hypre_StructMatrixDestroy(relax_data -> A); hypre_StructVectorDestroy(relax_data -> b); hypre_StructVectorDestroy(relax_data -> x); (relax_data -> A) = hypre_StructMatrixRef(A); (relax_data -> x) = hypre_StructVectorRef(x); (relax_data -> b) = hypre_StructVectorRef(b); (relax_data -> num_iterations) = 0; /* if max_iter is zero, return */ if (max_iter == 0) { /* if using a zero initial guess, return zero */ if (zero_guess) { hypre_StructVectorSetConstantValues(x, 0.0); } hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } constant_coefficient = hypre_StructMatrixConstantCoefficient(A); if (constant_coefficient) hypre_StructVectorClearBoundGhostValues(x, 0); rsumsq = 0.0; if ( tol>0.0 ) bsumsq = hypre_StructInnerProd( b, b ); /*---------------------------------------------------------- * Do zero_guess iteration *----------------------------------------------------------*/ p = 0; iter = 0; if ( tol>0.0) { matvec_data = hypre_StructMatvecCreate(); hypre_StructMatvecSetup( matvec_data, A, x ); } if (zero_guess) { if ( p==0 ) rsumsq = 0.0; if (num_pointsets > 1) { hypre_StructVectorSetConstantValues(x, 0.0); } pointset = pointset_ranks[p]; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); Ap = hypre_StructMatrixBoxData(A, i, diag_rank); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); /* all matrix coefficients are constant */ if ( constant_coefficient==1 ) { Ai = hypre_CCBoxIndexRank( A_data_box, start ); AAp0 = 1/Ap[Ai]; hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, b_data_box, start, stride, bi, x_data_box, start, stride, xi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,xi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, xi) { xp[xi] = bp[bi] * AAp0; } hypre_BoxLoop2End(bi, xi); } /* constant_coefficent 0 (variable) or 2 (variable diagonal only) are the same for the diagonal */ else { hypre_BoxLoop3Begin(hypre_StructVectorNDim(x), loop_size, A_data_box, start, stride, Ai, b_data_box, start, stride, bi, x_data_box, start, stride, xi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,bi,xi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, bi, xi) { xp[xi] = bp[bi] / Ap[Ai]; } hypre_BoxLoop3End(Ai, bi, xi); } } } } if (weight != 1.0) { hypre_StructScale(weight, x); } p = (p + 1) % num_pointsets; iter = iter + (p == 0); if ( tol>0.0 && p==0 ) /* ... p==0 here means we've finished going through all the pointsets, i.e. this iteration is complete. tol>0.0 means to do a convergence test, using tol. The test is simply ||r||/||b||<tol, where r=residual, b=r.h.s., unweighted L2 norm */ { hypre_StructCopy( b, t ); /* t = b */ hypre_StructMatvecCompute( matvec_data, -1.0, A, x, 1.0, t ); /* t = - A x + t = - A x + b */ rsumsq = hypre_StructInnerProd( t, t ); /* <t,t> */ if ( rsumsq/bsumsq<tol2 ) max_iter = iter; /* converged; reset max_iter to prevent more iterations */ } } /*---------------------------------------------------------- * Do regular iterations *----------------------------------------------------------*/ while (iter < max_iter) { if ( p==0 ) rsumsq = 0.0; pointset = pointset_ranks[p]; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; /*hypre_StructCopy(x, t); ... not needed as long as the copy at the end of the loop is restricted to the current pointset (hypre_relax_copy, hypre_relax_wtx */ for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { xp = hypre_StructVectorData(x); hypre_InitializeIndtComputations(compute_pkg, xp, &comm_handle); compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { hypre_FinalizeIndtComputations(comm_handle); compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); A_data_box = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), i); b_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(b), i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); t_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(t), i); bp = hypre_StructVectorBoxData(b, i); xp = hypre_StructVectorBoxData(x, i); tp = hypre_StructVectorBoxData(t, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); if ( constant_coefficient==1 || constant_coefficient==2 ) { hypre_PointRelax_core12( relax_vdata, A, constant_coefficient, compute_box, bp, xp, tp, i, A_data_box, b_data_box, x_data_box, t_data_box, stride ); } else { hypre_PointRelax_core0( relax_vdata, A, constant_coefficient, compute_box, bp, xp, tp, i, A_data_box, b_data_box, x_data_box, t_data_box, stride ); } Ap = hypre_StructMatrixBoxData(A, i, diag_rank); if ( constant_coefficient==0 || constant_coefficient==2 ) /* divide by the variable diagonal */ { start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, A_data_box, start, stride, Ai, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(Ai, ti) { tp[ti] /= Ap[Ai]; } hypre_BoxLoop2End(Ai, ti); } } } } if (weight != 1.0) { /* hypre_StructScale((1.0 - weight), x); hypre_StructAxpy(weight, t, x);*/ hypre_relax_wtx( relax_data, pointset, t, x ); /* x=w*t+(1-w)*x on pointset */ } else { hypre_relax_copy( relax_data, pointset, t, x ); /* x=t on pointset */ /* hypre_StructCopy(t, x);*/ } p = (p + 1) % num_pointsets; iter = iter + (p == 0); if ( tol>0.0 && p==0 ) /* ... p==0 here means we've finished going through all the pointsets, i.e. this iteration is complete. tol>0.0 means to do a convergence test, using tol. The test is simply ||r||/||b||<tol, where r=residual, b=r.h.s., unweighted L2 norm */ { hypre_StructCopy( b, t ); /* t = b */ hypre_StructMatvecCompute( matvec_data, -1.0, A, x, 1.0, t ); /* t = - A x + t = - A x + b */ rsumsq = hypre_StructInnerProd( t, t ); /* <t,t> */ if ( rsumsq/bsumsq<tol2 ) break; } } if ( tol>0.0 ) { hypre_StructMatvecDestroy( matvec_data ); } if ( tol>0.0 ) (relax_data -> rresnorm) = sqrt( rsumsq/bsumsq ); (relax_data -> num_iterations) = iter; /*----------------------------------------------------------------------- * Return *-----------------------------------------------------------------------*/ hypre_IncFLOPCount(relax_data -> flops); hypre_EndTiming(relax_data -> time_index); return hypre_error_flag; } /* for constant_coefficient==0, all coefficients may vary ...*/ HYPRE_Int hypre_PointRelax_core0( void *relax_vdata, hypre_StructMatrix *A, HYPRE_Int constant_coefficient, hypre_Box *compute_box, HYPRE_Real *bp, HYPRE_Real *xp, HYPRE_Real *tp, HYPRE_Int boxarray_id, hypre_Box *A_data_box, hypre_Box *b_data_box, hypre_Box *x_data_box, hypre_Box *t_data_box, hypre_IndexRef stride ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Real *Ap0; HYPRE_Real *Ap1; HYPRE_Real *Ap2; HYPRE_Real *Ap3; HYPRE_Real *Ap4; HYPRE_Real *Ap5; HYPRE_Real *Ap6; HYPRE_Int xoff0; HYPRE_Int xoff1; HYPRE_Int xoff2; HYPRE_Int xoff3; HYPRE_Int xoff4; HYPRE_Int xoff5; HYPRE_Int xoff6; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_IndexRef start; hypre_Index loop_size; HYPRE_Int si, sk, ssi[MAX_DEPTH], depth, k; HYPRE_Int Ai; HYPRE_Int bi; HYPRE_Int xi; HYPRE_Int ti; stencil = hypre_StructMatrixStencil(A); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, b_data_box, start, stride, bi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, ti) { tp[ti] = bp[bi]; } hypre_BoxLoop2End(bi, ti); /* unroll up to depth MAX_DEPTH */ for (si = 0; si < stencil_size; si += MAX_DEPTH) { depth = hypre_min(MAX_DEPTH, (stencil_size - si)); for (k = 0, sk = si; k < depth; sk++) { if (sk == diag_rank) { depth--; } else { ssi[k] = sk; k++; } } switch(depth) { case 7: Ap6 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[6]); xoff6 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[6]]); case 6: Ap5 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[5]); xoff5 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[5]]); case 5: Ap4 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[4]); xoff4 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[4]]); case 4: Ap3 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[3]); xoff3 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[3]]); case 3: Ap2 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[2]); xoff2 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[2]]); case 2: Ap1 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[1]); xoff1 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[1]]); case 1: Ap0 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[0]); xoff0 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[0]]); case 0: break; } switch(depth) { case 7: hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3] + Ap4[Ai] * xp[xi + xoff4] + Ap5[Ai] * xp[xi + xoff5] + Ap6[Ai] * xp[xi + xoff6]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 6: hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3] + Ap4[Ai] * xp[xi + xoff4] + Ap5[Ai] * xp[xi + xoff5]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 5: hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3] + Ap4[Ai] * xp[xi + xoff4]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 4: hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2] + Ap3[Ai] * xp[xi + xoff3]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 3: hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1] + Ap2[Ai] * xp[xi + xoff2]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 2: hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0] + Ap1[Ai] * xp[xi + xoff1]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 1: hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size, A_data_box, start, stride, Ai, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ai,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ai, xi, ti) { tp[ti] -= Ap0[Ai] * xp[xi + xoff0]; } hypre_BoxLoop3End(Ai, xi, ti); break; case 0: break; } } return hypre_error_flag; } /* for constant_coefficient==1 or 2, all offdiagonal coefficients constant over space ...*/ HYPRE_Int hypre_PointRelax_core12( void *relax_vdata, hypre_StructMatrix *A, HYPRE_Int constant_coefficient, hypre_Box *compute_box, HYPRE_Real *bp, HYPRE_Real *xp, HYPRE_Real *tp, HYPRE_Int boxarray_id, hypre_Box *A_data_box, hypre_Box *b_data_box, hypre_Box *x_data_box, hypre_Box *t_data_box, hypre_IndexRef stride ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Real *Apd; HYPRE_Real *Ap0; HYPRE_Real *Ap1; HYPRE_Real *Ap2; HYPRE_Real *Ap3; HYPRE_Real *Ap4; HYPRE_Real *Ap5; HYPRE_Real *Ap6; HYPRE_Real AAp0; HYPRE_Real AAp1; HYPRE_Real AAp2; HYPRE_Real AAp3; HYPRE_Real AAp4; HYPRE_Real AAp5; HYPRE_Real AAp6; HYPRE_Real AApd; HYPRE_Int xoff0; HYPRE_Int xoff1; HYPRE_Int xoff2; HYPRE_Int xoff3; HYPRE_Int xoff4; HYPRE_Int xoff5; HYPRE_Int xoff6; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int stencil_size; HYPRE_Int diag_rank = (relax_data -> diag_rank); hypre_IndexRef start; hypre_Index loop_size; HYPRE_Int si, sk, ssi[MAX_DEPTH], depth, k; HYPRE_Int Ai; HYPRE_Int bi; HYPRE_Int xi; HYPRE_Int ti; stencil = hypre_StructMatrixStencil(A); stencil_shape = hypre_StructStencilShape(stencil); stencil_size = hypre_StructStencilSize(stencil); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); /* The standard (variable coefficient) algorithm initializes tp=bp. Do it here, but for constant diagonal, also divide by the diagonal (and set up AApd for other division-equivalents. For a variable diagonal, this diagonal division is done at the end of the computation. */ Ai = hypre_CCBoxIndexRank( A_data_box, start ); if ( constant_coefficient==1 ) /* constant diagonal */ { Apd = hypre_StructMatrixBoxData(A, boxarray_id, diag_rank); AApd = 1/Apd[Ai]; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, b_data_box, start, stride, bi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, ti) { tp[ti] = AApd * bp[bi]; } hypre_BoxLoop2End(bi, ti); } else /* constant_coefficient==2, variable diagonal */ { AApd = 1; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, b_data_box, start, stride, bi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,bi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(bi, ti) { tp[ti] = bp[bi]; } hypre_BoxLoop2End(bi, ti); } /* unroll up to depth MAX_DEPTH */ for (si = 0; si < stencil_size; si += MAX_DEPTH) { depth = hypre_min(MAX_DEPTH, (stencil_size - si)); for (k = 0, sk = si; k < depth; sk++) { if (sk == diag_rank) { depth--; } else { ssi[k] = sk; k++; } } switch(depth) { case 7: Ap6 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[6]); xoff6 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[6]]); case 6: Ap5 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[5]); xoff5 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[5]]); case 5: Ap4 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[4]); xoff4 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[4]]); case 4: Ap3 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[3]); xoff3 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[3]]); case 3: Ap2 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[2]); xoff2 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[2]]); case 2: Ap1 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[1]); xoff1 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[1]]); case 1: Ap0 = hypre_StructMatrixBoxData(A, boxarray_id, ssi[0]); xoff0 = hypre_BoxOffsetDistance( x_data_box, stencil_shape[ssi[0]]); case 0: break; } switch(depth) { case 7: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; AAp4 = Ap4[Ai]*AApd; AAp5 = Ap5[Ai]*AApd; AAp6 = Ap6[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3] + AAp4 * xp[xi + xoff4] + AAp5 * xp[xi + xoff5] + AAp6 * xp[xi + xoff6]; } hypre_BoxLoop2End(xi, ti); break; case 6: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; AAp4 = Ap4[Ai]*AApd; AAp5 = Ap5[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3] + AAp4 * xp[xi + xoff4] + AAp5 * xp[xi + xoff5]; } hypre_BoxLoop2End(xi, ti); break; case 5: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; AAp4 = Ap4[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3] + AAp4 * xp[xi + xoff4]; } hypre_BoxLoop2End(xi, ti); break; case 4: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; AAp3 = Ap3[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2] + AAp3 * xp[xi + xoff3]; } hypre_BoxLoop2End(xi, ti); break; case 3: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; AAp2 = Ap2[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1] + AAp2 * xp[xi + xoff2]; } hypre_BoxLoop2End(xi, ti); break; case 2: AAp0 = Ap0[Ai]*AApd; AAp1 = Ap1[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0] + AAp1 * xp[xi + xoff1]; } hypre_BoxLoop2End(xi, ti); break; case 1: AAp0 = Ap0[Ai]*AApd; hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { tp[ti] -= AAp0 * xp[xi + xoff0]; } hypre_BoxLoop2End(xi, ti); break; case 0: break; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetTol( void *relax_vdata, HYPRE_Real tol ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; (relax_data -> tol) = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetTol( void *relax_vdata, HYPRE_Real *tol ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; *tol = (relax_data -> tol); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetMaxIter( void *relax_vdata, HYPRE_Int max_iter ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; (relax_data -> max_iter) = max_iter; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetMaxIter( void *relax_vdata, HYPRE_Int * max_iter ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; *max_iter = (relax_data -> max_iter); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetZeroGuess( void *relax_vdata, HYPRE_Int zero_guess ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; (relax_data -> zero_guess) = zero_guess; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetZeroGuess( void *relax_vdata, HYPRE_Int * zero_guess ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; *zero_guess = (relax_data -> zero_guess); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetNumIterations( void *relax_vdata, HYPRE_Int * num_iterations ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; *num_iterations = (relax_data -> num_iterations); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetWeight( void *relax_vdata, HYPRE_Real weight ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; (relax_data -> weight) = weight; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetNumPointsets( void *relax_vdata, HYPRE_Int num_pointsets ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Int i; /* free up old pointset memory */ for (i = 0; i < (relax_data -> num_pointsets); i++) { hypre_TFree(relax_data -> pointset_indices[i]); } hypre_TFree(relax_data -> pointset_sizes); hypre_TFree(relax_data -> pointset_ranks); hypre_TFree(relax_data -> pointset_strides); hypre_TFree(relax_data -> pointset_indices); /* alloc new pointset memory */ (relax_data -> num_pointsets) = num_pointsets; (relax_data -> pointset_sizes) = hypre_TAlloc(HYPRE_Int, num_pointsets); (relax_data -> pointset_ranks) = hypre_TAlloc(HYPRE_Int, num_pointsets); (relax_data -> pointset_strides) = hypre_TAlloc(hypre_Index, num_pointsets); (relax_data -> pointset_indices) = hypre_TAlloc(hypre_Index *, num_pointsets); for (i = 0; i < num_pointsets; i++) { (relax_data -> pointset_sizes[i]) = 0; (relax_data -> pointset_ranks[i]) = i; (relax_data -> pointset_indices[i]) = NULL; } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetPointset( void *relax_vdata, HYPRE_Int pointset, HYPRE_Int pointset_size, hypre_Index pointset_stride, hypre_Index *pointset_indices ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Int i; /* free up old pointset memory */ hypre_TFree(relax_data -> pointset_indices[pointset]); /* alloc new pointset memory */ (relax_data -> pointset_indices[pointset]) = hypre_TAlloc(hypre_Index, pointset_size); (relax_data -> pointset_sizes[pointset]) = pointset_size; hypre_CopyIndex(pointset_stride, (relax_data -> pointset_strides[pointset])); for (i = 0; i < pointset_size; i++) { hypre_CopyIndex(pointset_indices[i], (relax_data -> pointset_indices[pointset][i])); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetPointsetRank( void *relax_vdata, HYPRE_Int pointset, HYPRE_Int pointset_rank ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; (relax_data -> pointset_ranks[pointset]) = pointset_rank; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxSetTempVec( void *relax_vdata, hypre_StructVector *t ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; hypre_StructVectorDestroy(relax_data -> t); (relax_data -> t) = hypre_StructVectorRef(t); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PointRelaxGetFinalRelativeResidualNorm( void * relax_vdata, HYPRE_Real * norm ) { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; *norm = relax_data -> rresnorm; return 0; } /*-------------------------------------------------------------------------- * Special vector operation for use in hypre_PointRelax - * convex combination of vectors on specified pointsets. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_relax_wtx( void *relax_vdata, HYPRE_Int pointset, hypre_StructVector *t, hypre_StructVector *x ) /* Sets x to a convex combination of x and t, x = weight * t + (1-weight) * x, but only in the specified pointset */ { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; HYPRE_Real weight = (relax_data -> weight); hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_ComputePkg **compute_pkgs = (relax_data -> compute_pkgs); hypre_ComputePkg *compute_pkg; hypre_IndexRef stride; hypre_IndexRef start; hypre_Index loop_size; HYPRE_Real weightc = 1 - weight; HYPRE_Real *xp, *tp; HYPRE_Int compute_i, i, j, xi, ti; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *x_data_box; hypre_Box *t_data_box; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); t_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(t), i); xp = hypre_StructVectorBoxData(x, i); tp = hypre_StructVectorBoxData(t, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { xp[xi] = weight*tp[ti] + weightc*xp[xi]; } hypre_BoxLoop2End(xi, ti); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Special vector operation for use in hypre_PointRelax - * vector copy on specified pointsets. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_relax_copy( void *relax_vdata, HYPRE_Int pointset, hypre_StructVector *t, hypre_StructVector *x ) /* Sets x to t, x=t, but only in the specified pointset. */ { hypre_PointRelaxData *relax_data = (hypre_PointRelaxData *)relax_vdata; hypre_Index *pointset_strides = (relax_data -> pointset_strides); hypre_ComputePkg **compute_pkgs = (relax_data -> compute_pkgs); hypre_ComputePkg *compute_pkg; hypre_IndexRef stride; hypre_IndexRef start; hypre_Index loop_size; HYPRE_Real *xp, *tp; HYPRE_Int compute_i, i, j, xi, ti; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *x_data_box; hypre_Box *t_data_box; compute_pkg = compute_pkgs[pointset]; stride = pointset_strides[pointset]; for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } hypre_ForBoxArrayI(i, compute_box_aa) { compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); t_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(t), i); xp = hypre_StructVectorBoxData(x, i); tp = hypre_StructVectorBoxData(t, i); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, stride, xi, t_data_box, start, stride, ti); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,ti) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, ti) { xp[xi] = tp[ti]; } hypre_BoxLoop2End(xi, ti); } } } return hypre_error_flag; }
GB_unop__lnot_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_int64_int64) // op(A') function: GB (_unop_tran__lnot_int64_int64) // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_int64_int64) ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = !(z != 0) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_int64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
papiw_util.h
#ifndef PAPIWUTIL #define PAPIWUTIL #ifndef NOPAPIW #include <stdlib.h> #include <stdio.h> #include <vector> #include <iostream> #include <algorithm> #include <papi.h> #include <omp.h> #include <pthread.h> /** * PapiWrapper abstract class * * This interface defines the public interface and provides default functionality * and further utility functions */ class PapiWrapper { public: virtual ~PapiWrapper() {} virtual void AddEvent(const int eventCode) = 0; virtual void Start() = 0; virtual void Stop() = 0; virtual long long GetResult(const int eventCode) = 0; virtual void Print() = 0; virtual void Reset() = 0; /** * Default * * @tparam PapiCodes a variadic list of PAPI eventcodes * @warning Exits with an error if called in a parallel region */ template <typename... PapiCodes> void Init(PapiCodes const... eventcodes) { /* Initialize the PAPI library */ retval = PAPI_library_init(PAPI_VER_CURRENT); if (retval != PAPI_VER_CURRENT) handle_error("Init", "PAPI library init error!\n", retval); /* Some more initialization inside the specialization classes*/ localInit(); /* Prepare Events */ static_assert(std::conjunction<std::is_integral<PapiCodes>...>(), "All parameters to Init must be of integral type"); int args[]{eventcodes...}; //unpack for (auto eventcode : args) AddEvent(eventcode); } protected: int retval; virtual void localInit() {} /* Exit with an error message */ void handle_error(const char *location, const char *msg, const int retval = PAPI_OK) { if (retval == PAPI_OK) fprintf(stderr, "PAPI ERROR in %s: %s\n", location, msg); else fprintf(stderr, "PAPI ERROR (Code %d) in %s: %s\n", retval, location, msg); exit(1); } /* Print a warning message */ void issue_waring(const char *location, const char *msg, const int retval = PAPI_OK) { if (retval == PAPI_OK) fprintf(stderr, "PAPI WARNING in %s: %s\n", location, msg); else fprintf(stderr, "PAPI WARNING (Code %d) in %s: %s\n", retval, location, msg); } /* Print results */ void print(const std::vector<int> &events, const long long *values) { for (auto eventCode : events) std::cout << getDescription(eventCode) << ": " << GetResult(eventCode) << std::endl; /* Print Headers */ std::cout << "@%% "; for (auto eventCode : events) { auto description = getDescription(eventCode); for (int j = 0; description[j] != '\0' && description[j] != ' ' && j < 20; j++) std::cout << description[j]; std::cout << " "; } std::cout << std::endl; /* Print results */ int count = events.size(); std::cout << "@%@ "; for (int i = 0; i < count; i++) std::cout << values[i] << " "; std::cout << std::endl; } /* Get Descriptiion Text of event */ const char *getDescription(const int eventCode) { switch (eventCode) { case PAPI_L1_DCM: return "PAPI_L1_DCM (Level 1 data cache misses)"; case PAPI_L1_ICM: return "PAPI_L1_ICM (Level 1 instruction cache misses)"; case PAPI_L2_DCM: return "PAPI_L2_DCM (Level 2 data cache misses)"; case PAPI_L2_ICM: return "PAPI_L2_ICM (Level 2 instruction cache misses)"; case PAPI_L3_DCM: return "PAPI_L3_DCM (Level 3 data cache misses)"; case PAPI_L3_ICM: return "PAPI_L3_ICM (Level 3 instruction cache misses)"; case PAPI_L1_TCM: return "PAPI_L1_TCM (Level 1 total cache misses)"; case PAPI_L2_TCM: return "PAPI_L2_TCM (Level 2 total cache misses)"; case PAPI_L3_TCM: return "PAPI_L3_TCM (Level 3 total cache misses)"; case PAPI_CA_SNP: return "PAPI_CA_SNP (Snoops)"; case PAPI_CA_SHR: return "PAPI_CA_SHR (Request for shared cache line (SMP))"; case PAPI_CA_CLN: return "PAPI_CA_CLN (Request for clean cache line (SMP))"; case PAPI_CA_INV: return "PAPI_CA_INV (Request for cache line Invalidation (SMP))"; case PAPI_CA_ITV: return "PAPI_CA_ITV (Request for cache line Intervention (SMP))"; case PAPI_L3_LDM: return "PAPI_L3_LDM (Level 3 load misses)"; case PAPI_L3_STM: return "PAPI_L3_STM (Level 3 store misses)"; case PAPI_BRU_IDL: return "PAPI_BRU_IDL (Cycles branch units are idle)"; case PAPI_FXU_IDL: return "PAPI_FXU_IDL (Cycles integer units are idle)"; case PAPI_FPU_IDL: return "PAPI_FPU_IDL (Cycles floating point units are idle)"; case PAPI_LSU_IDL: return "PAPI_LSU_IDL (Cycles load/store units are idle)"; case PAPI_TLB_DM: return "PAPI_TLB_DM (Data translation lookaside buffer misses)"; case PAPI_TLB_IM: return "PAPI_TLB_IM (Instr translation lookaside buffer misses)"; case PAPI_TLB_TL: return "PAPI_TLB_TL (Total translation lookaside buffer misses)"; case PAPI_L1_LDM: return "PAPI_L1_LDM (Level 1 load misses)"; case PAPI_L1_STM: return "PAPI_L1_STM (Level 1 store misses)"; case PAPI_L2_LDM: return "PAPI_L2_LDM (Level 2 load misses)"; case PAPI_L2_STM: return "PAPI_L2_STM (Level 2 store misses)"; case PAPI_BTAC_M: return "PAPI_BTAC_M (BTAC miss)"; case PAPI_PRF_DM: return "PAPI_PRF_DM (Prefetch data instruction caused a miss)"; case PAPI_L3_DCH: return "PAPI_L3_DCH (Level 3 Data Cache Hit)"; case PAPI_TLB_SD: return "PAPI_TLB_SD (Xlation lookaside buffer shootdowns (SMP))"; case PAPI_CSR_FAL: return "PAPI_CSR_FAL (Failed store conditional instructions)"; case PAPI_CSR_SUC: return "PAPI_CSR_SUC (Successful store conditional instructions)"; case PAPI_CSR_TOT: return "PAPI_CSR_TOT (Total store conditional instructions)"; case PAPI_MEM_SCY: return "PAPI_MEM_SCY (Cycles Stalled Waiting for Memory Access)"; case PAPI_MEM_RCY: return "PAPI_MEM_RCY (Cycles Stalled Waiting for Memory Read)"; case PAPI_MEM_WCY: return "PAPI_MEM_WCY (Cycles Stalled Waiting for Memory Write)"; case PAPI_STL_ICY: return "PAPI_STL_ICY (Cycles with No Instruction Issue)"; case PAPI_FUL_ICY: return "PAPI_FUL_ICY (Cycles with Maximum Instruction Issue)"; case PAPI_STL_CCY: return "PAPI_STL_CCY (Cycles with No Instruction Completion)"; case PAPI_FUL_CCY: return "PAPI_FUL_CCY (Cycles with Maximum Instruction Completion)"; case PAPI_HW_INT: return "PAPI_HW_INT (Hardware interrupts)"; case PAPI_BR_UCN: return "PAPI_BR_UCN (Unconditional branch instructions executed)"; case PAPI_BR_CN: return "PAPI_BR_CN (Conditional branch instructions executed)"; case PAPI_BR_TKN: return "PAPI_BR_TKN (Conditional branch instructions taken)"; case PAPI_BR_NTK: return "PAPI_BR_NTK (Conditional branch instructions not taken)"; case PAPI_BR_MSP: return "PAPI_BR_MSP (Conditional branch instructions mispred)"; case PAPI_BR_PRC: return "PAPI_BR_PRC (Conditional branch instructions corr. pred)"; case PAPI_FMA_INS: return "PAPI_FMA_INS (FMA instructions completed)"; case PAPI_TOT_IIS: return "PAPI_TOT_IIS (Total instructions issued)"; case PAPI_TOT_INS: return "PAPI_TOT_INS (Total instructions executed)"; case PAPI_INT_INS: return "PAPI_INT_INS (Integer instructions executed)"; case PAPI_FP_INS: return "PAPI_FP_INS (Floating point instructions executed)"; case PAPI_LD_INS: return "PAPI_LD_INS (Load instructions executed)"; case PAPI_SR_INS: return "PAPI_SR_INS (Store instructions executed)"; case PAPI_BR_INS: return "PAPI_BR_INS (Total branch instructions executed)"; case PAPI_VEC_INS: return "PAPI_VEC_INS (Vector/SIMD instructions executed (could include integer))"; case PAPI_RES_STL: return "PAPI_RES_STL (Cycles processor is stalled on resource)"; case PAPI_FP_STAL: return "PAPI_FP_STAL (Cycles any FP units are stalled)"; case PAPI_TOT_CYC: return "PAPI_TOT_CYC (Total cycles executed)"; case PAPI_LST_INS: return "PAPI_LST_INS (Total load/store inst. executed)"; case PAPI_SYC_INS: return "PAPI_SYC_INS (Sync. inst. executed)"; case PAPI_L1_DCH: return "PAPI_L1_DCH (L1 D Cache Hit)"; case PAPI_L2_DCH: return "PAPI_L2_DCH (L2 D Cache Hit)"; case PAPI_L1_DCA: return "PAPI_L1_DCA (L1 D Cache Access)"; case PAPI_L2_DCA: return "PAPI_L2_DCA (L2 D Cache Access)"; case PAPI_L3_DCA: return "PAPI_L3_DCA (L3 D Cache Access)"; case PAPI_L1_DCR: return "PAPI_L1_DCR (L1 D Cache Read)"; case PAPI_L2_DCR: return "PAPI_L2_DCR (L2 D Cache Read)"; case PAPI_L3_DCR: return "PAPI_L3_DCR (L3 D Cache Read)"; case PAPI_L1_DCW: return "PAPI_L1_DCW (L1 D Cache Write)"; case PAPI_L2_DCW: return "PAPI_L2_DCW (L2 D Cache Write)"; case PAPI_L3_DCW: return "PAPI_L3_DCW (L3 D Cache Write)"; case PAPI_L1_ICH: return "PAPI_L1_ICH (L1 instruction cache hits)"; case PAPI_L2_ICH: return "PAPI_L2_ICH (L2 instruction cache hits)"; case PAPI_L3_ICH: return "PAPI_L3_ICH (L3 instruction cache hits)"; case PAPI_L1_ICA: return "PAPI_L1_ICA (L1 instruction cache accesses)"; case PAPI_L2_ICA: return "PAPI_L2_ICA (L2 instruction cache accesses)"; case PAPI_L3_ICA: return "PAPI_L3_ICA (L3 instruction cache accesses)"; case PAPI_L1_ICR: return "PAPI_L1_ICR (L1 instruction cache reads)"; case PAPI_L2_ICR: return "PAPI_L2_ICR (L2 instruction cache reads)"; case PAPI_L3_ICR: return "PAPI_L3_ICR (L3 instruction cache reads)"; case PAPI_L1_ICW: return "PAPI_L1_ICW (L1 instruction cache writes)"; case PAPI_L2_ICW: return "PAPI_L2_ICW (L2 instruction cache writes)"; case PAPI_L3_ICW: return "PAPI_L3_ICW (L3 instruction cache writes)"; case PAPI_L1_TCH: return "PAPI_L1_TCH (L1 total cache hits)"; case PAPI_L2_TCH: return "PAPI_L2_TCH (L2 total cache hits)"; case PAPI_L3_TCH: return "PAPI_L3_TCH (L3 total cache hits)"; case PAPI_L1_TCA: return "PAPI_L1_TCA (L1 total cache accesses)"; case PAPI_L2_TCA: return "PAPI_L2_TCA (L2 total cache accesses)"; case PAPI_L3_TCA: return "PAPI_L3_TCA (L3 total cache accesses)"; case PAPI_L1_TCR: return "PAPI_L1_TCR (L1 total cache reads)"; case PAPI_L2_TCR: return "PAPI_L2_TCR (L2 total cache reads)"; case PAPI_L3_TCR: return "PAPI_L3_TCR (L3 total cache reads)"; case PAPI_L1_TCW: return "PAPI_L1_TCW (L1 total cache writes)"; case PAPI_L2_TCW: return "PAPI_L2_TCW (L2 total cache writes)"; case PAPI_L3_TCW: return "PAPI_L3_TCW (L3 total cache writes)"; case PAPI_FML_INS: return "PAPI_FML_INS (FM ins)"; case PAPI_FAD_INS: return "PAPI_FAD_INS (FA ins)"; case PAPI_FDV_INS: return "PAPI_FDV_INS (FD ins)"; case PAPI_FSQ_INS: return "PAPI_FSQ_INS (FSq ins)"; case PAPI_FNV_INS: return "PAPI_FNV_INS (Finv ins)"; case PAPI_FP_OPS: return "PAPI_FP_OPS (Floating point operations executed)"; case PAPI_SP_OPS: return "PAPI_SP_OPS (Floating point operations executed: optimized to count scaled single precision vector operations)"; case PAPI_DP_OPS: return "PAPI_DP_OPS (Floating point operations executed: optimized to count scaled double precision vector operations)"; case PAPI_VEC_SP: return "PAPI_VEC_SP (Single precision vector/SIMD instructions)"; case PAPI_VEC_DP: return "PAPI_VEC_DP (Double precision vector/SIMD instructions)"; case PAPI_REF_CYC: return "PAPI_REF_CYC (Reference clock cycles)"; default: return "UNKNOWN CODE"; } } }; /** * PapiWrapper class for Sequential use * * It is discoureaged to use this class directly but rather through the utility functions * inside the PAPIW namespace. */ class PapiWrapperSingle : public PapiWrapper { private: static int const papiMaxAllowedCounters = 20; int eventSet = PAPI_NULL; bool running = false; long long buffer[papiMaxAllowedCounters]; long long values[papiMaxAllowedCounters]; std::vector<int> events; public: PapiWrapperSingle() : ThreadID(0) {} PapiWrapperSingle(const unsigned long threadID) : ThreadID(threadID) {} ~PapiWrapperSingle() {} const unsigned long ThreadID; /* Add an event to be counted */ void AddEvent(const int eventCode) override { if (running) handle_error("AddEvent", "You can't add events while Papi is running\n"); if (events.size() >= papiMaxAllowedCounters) handle_error("AddEvent", "Event count limit exceeded. Check papiMaxAllowedCounters\n"); if (eventSet == PAPI_NULL) { retval = PAPI_create_eventset(&eventSet); if (retval != PAPI_OK) handle_error("AddEvent", "Could not create event set", retval); } retval = PAPI_add_event(eventSet, eventCode); if (retval != PAPI_OK) issue_waring("AddEvent. Could not add", getDescription(eventCode), retval); else events.push_back(eventCode); } /* Start the counter */ void Start() override { if (running) handle_error("Start", "You can not start an already running PAPI instance"); retval = PAPI_start(eventSet); if (retval != PAPI_OK) handle_error("Start", "Could not start PAPI counters", retval); running = true; } /* Stop the counter */ void Stop() override { if (!running) handle_error("Stop", "You can not stop an already stopped Papi instance"); retval = PAPI_stop(eventSet, buffer); if (retval != PAPI_OK) handle_error("Stop", "Could not stop PAPI counters", retval); int count = events.size(); for (int i = 0; i < count; i++) values[i] += buffer[i]; running = false; } /* Get the result of a specific event */ long long GetResult(const int eventCode) override { if (running) handle_error("GetResult", "You can't get results while Papi is running\n"); auto indexInResult = std::find(events.begin(), events.end(), eventCode); if (indexInResult == events.end()) handle_error("GetResult", "The event is not supported or has not been added to the set"); return values[indexInResult - events.begin()]; } /* Reset the intermediate counter values */ void Reset() { if (running) handle_error("Reset", "You can't reset while Papi is running\n"); localInit(); } /* Getter Method for running state */ bool IsRunning() { return running; } /* Print the results */ const long long *GetValues() { return values; } /* Print the results */ void Print() override { if (running) handle_error("Print", "You can not print while Papi is running. Stop the counters first!"); std::cout << "PAPIW Single PapiWrapper instance report:" << std::endl; print(events, values); } protected: /* Initialize the values array */ void localInit() override { for (int i = 0; i < papiMaxAllowedCounters; i++) values[i] = 0; } }; #ifdef _OPENMP /** * PapiWrapper class for Parallel use * * It is discoureaged to use this class directly but rather through the utility functions * inside the PAPIW namespace. */ class PapiWrapperParallel : public PapiWrapper { private: inline static PapiWrapperSingle *localPapi; #pragma omp threadprivate(localPapi) std::vector<int> events; std::vector<long long> values; int numRunningThreads = 0; //0 is none running bool startedFromParallelRegion = false; public: PapiWrapperParallel() {} ~PapiWrapperParallel() { std::cout << "Destructing Local Papis" << std::endl; checkNotInParallelRegion("DESTRUCTOR"); #pragma omp parallel { delete localPapi; localPapi = nullptr; } } /* Register events to be counted */ void AddEvent(const int eventCode) override { checkNotInParallelRegion("ADD_EVENT"); checkNoneRunning("ADD_EVENT"); events.push_back(eventCode); values.push_back(0); } /* Start the counters */ void Start() override { if (isInParallelRegion()) { #pragma omp single checkNoneRunning("START"); startedFromParallelRegion = true; start(); } else { checkNoneRunning("START"); startedFromParallelRegion = false; #pragma omp parallel { start(); } } } /* Stop the counters */ void Stop() override { checkNumberOfThreads("STOP"); if (isInParallelRegion()) { if (!startedFromParallelRegion) issue_waring("Stop", "The Papi Counters have not been started in a parallel Region. You should not stop them in a parallel region, however, the results should be fine."); stop(); #pragma omp barrier numRunningThreads = 0; } else { if (startedFromParallelRegion) issue_waring("Stop", "The Papi Counters have been started in a parallel Region. You should stop them in the same parallel region or move Start/Stop completely out of the parallel region."); #pragma omp parallel { stop(); } numRunningThreads = 0; } } /* Get the result of a specific event */ long long GetResult(const int eventCode) override { checkNoneRunning("GET_RESULT"); auto indexInResult = std::find(events.begin(), events.end(), eventCode); if (indexInResult == events.end()) handle_error("GetResult", "The event is not supported or has not been added to the set"); return values[indexInResult - events.begin()]; } /* Print the values */ void Print() override { checkNoneRunning("PRINT"); #pragma omp single { std::cout << "PAPIW Parallel PapiWrapper instance report:" << std::endl; print(events, values.data()); } } /* Reset the values */ void Reset() { checkNoneRunning("RESET"); #pragma omp single std::fill(values.begin(), values.end(), 0); } protected: /* Initialize the instance */ void localInit() override { checkNotInParallelRegion("INIT"); retval = PAPI_thread_init(pthread_self); if (retval != PAPI_OK) handle_error("localInit in PapiWrapperParallel", "Could not initialize OMP Support", retval); else std::cout << "Papi Parallel support enabled" << std::endl; } /* Helper function to start the counters */ void start() { #pragma omp single numRunningThreads = omp_get_num_threads(); retval = PAPI_register_thread(); if (retval != PAPI_OK) handle_error("Start", "Couldn't register thread", retval); localPapi = new PapiWrapperSingle(pthread_self()); for (auto eventCode : events) localPapi->AddEvent(eventCode); localPapi->Start(); } /* Helper function to stop the counters and accumulate the values to total */ void stop() { localPapi->Stop(); /*Check that same thread is used since starting the counters*/ if (PAPI_thread_id() != localPapi->ThreadID) handle_error("Stop", "Invalid State: The Thread Ids differs from initialization!\nApparently, new threads were use without reassigning the Papi counters. Please Start and Stop more often to avoid this error."); int eventCount = events.size(); for (int i = 0; i < eventCount; i++) { auto localVal = localPapi->GetResult(events[i]); #pragma omp atomic values[i] += localVal; } delete localPapi; localPapi = nullptr; retval = PAPI_unregister_thread(); if (retval != PAPI_OK) handle_error("Stop", "Couldn't unregister thread", retval); } /* Returns the current OMP team size */ int GetNumThreads() { if (isInParallelRegion()) return omp_get_num_threads(); int count; #pragma omp parallel { #pragma omp master count = omp_get_num_threads(); } return count; } /* Returns true if it is called in a parallel region or false otherwise */ bool isInParallelRegion() { return omp_get_level() != 0; } /* Check that the current thread team size is not larger then it was last registered or exit with an error otherwise */ void checkNumberOfThreads(const char *actionMsg) { /* State check */ if (GetNumThreads() != numRunningThreads) handle_error(actionMsg, "The OMP teamsize is different than indicated in Start!"); } /* Check that this method is not called from a parallel context or exit with an error otherwise */ void checkNotInParallelRegion(const char *actionMsg) { if (isInParallelRegion()) handle_error(actionMsg, "You may not perform this operation from a parallel region"); } /* Check that no Papi Counter is running or exit with an error otherwise */ void checkNoneRunning(const char *actionMsg) { if (numRunningThreads) handle_error(actionMsg, "You can not perform this action while Papi is running. Stop the counters first!"); } }; #endif #endif #endif
simd-clone-4.h
template <int N> struct S { int s; #pragma omp declare simd notinbranch int f0 (int x); #pragma omp declare simd notinbranch uniform(this) int f1 (int x); #pragma omp declare simd notinbranch linear(this:sizeof(this)/sizeof(this)) int f2 (int x); }; template <int N> struct T { int t[64]; #pragma omp declare simd aligned(this:32) uniform(this) linear(x) int f3 (int x); };
hmacMD5_fmt_plug.c
/* * This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com> * and (c) magnum 2011-2015, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hmacMD5; #elif FMT_REGISTERS_H john_register_one(&fmt_hmacMD5); #else #include <string.h> #include "arch.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 // tuned for i7 using SSE2 and w/o HT #endif #endif #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "aligned.h" #include "simd-intrinsics.h" #include "base64_convert.h" #include "memdbg.h" #define FORMAT_LABEL "HMAC-MD5" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define MD5_N (SIMD_PARA_MD5 * SIMD_COEF_32) #endif #define ALGORITHM_NAME "password is key, MD5 " MD5_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 64 #define PAD_SIZE_W (PAD_SIZE/4) #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #ifdef SIMD_COEF_32 #define SALT_LIMBS 3 /* 3 limbs, 183 bytes */ #define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 9) #define SALT_ALIGN MEM_ALIGN_SIMD #else #define SALT_LENGTH 1023 #define SALT_ALIGN 1 #endif #define CIPHERTEXT_LENGTH (2 * SALT_LENGTH + 2 * BINARY_SIZE) #define HEXCHARS "0123456789abcdef" #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT MD5_N #define MAX_KEYS_PER_CRYPT MD5_N #define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i&63) & (0xffffffff - 3)) * SIMD_COEF_32 + ((i&63) & 3) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE * SIMD_COEF_32) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"what do ya want for nothing?#750c783e6ab0b503eaa86e310a5db738", "Jefe"}, {"YT1m11GDMm3oze0EdqO3FZmATSrxhquB#6c97850b296b34719b7cea5c0c751e22", ""}, {"2shXeqDlLdZ2pSMc0CBHfTyA5a9TKuSW#dfeb02c6f8a9ce89b554be60db3a2333", "magnum"}, {"#74e6f7298a9c2d168935f58c001bad88", ""}, {"The quick brown fox jumps over the lazy dog#80070713463e7749b90c2dc24911e275", "key"}, {"Beppe Grillo#F8457C3046C587BBCBD6D7036BA42C81", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {"$cram_md5$PG5vLXJlcGx5QGhhc2hjYXQubmV0Pg==$dXNlciA0NGVhZmQyMmZlNzY2NzBmNmIyODc5MDgxYTdmNWY3MQ==", "hashcat"}, {"MEaEObR2JNXgchVn93GLLH1Ud4qTzuC0#9a80bea0acd72231ea043210a173ec7f", "123"}, {"d2BbCbiSXTlglEstbFFlrRgPhR1KUa2s#7a553738bc4997e656329c1b1ef99e4f", "123456789"}, {"dBTmX1AdmnWyVkMKp7BEt4O3eBktdN2S#f6af0afd4f397504c3bfa3836bc04a0f", "passWOrd"}, {"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789#050a9dee01b2302914b2a78346721d9b", "magnum"}, {"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123#e4d0097fdc52f6fc50545d832784232d", "MaxLenSaltUsed"}, {NULL} }; #ifdef SIMD_COEF_32 static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; typedef struct cur_salt_t { unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT]; int salt_len; } cur_salt_t; static cur_salt_t *cur_salt; static int bufsize; #define SALT_SIZE sizeof(cur_salt_t) #else static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char (*opad)[PAD_SIZE]; static unsigned char cur_salt[SALT_LENGTH+1]; static MD5_CTX *ipad_ctx; static MD5_CTX *opad_ctx; #define SALT_SIZE sizeof(cur_salt) #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #ifdef SIMD_COEF_32 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 unsigned int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(BINARY_SIZE, i)] = 0x80; ((unsigned int*)crypt_key)[14 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + (i/SIMD_COEF_32) * PAD_SIZE_W * SIMD_COEF_32] = (BINARY_SIZE + PAD_SIZE) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad)); opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_32 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } /* Convert from Base64 format with tag to our legacy format */ static char *prepare(char *split_fields[10], struct fmt_main *self) { char *p = split_fields[1]; if (!strncmp(p, "$cram_md5$", 10)) { static char out[256]; int len, len2; char *d, *o = out; p += 10; memset(out, 0, sizeof(out)); if (!(d = strchr(p, '$'))) return split_fields[1]; len = base64_convert(p, e_b64_mime, (int)(d - p - 1), o, e_b64_raw, sizeof(out), flg_Base64_MIME_TRAIL_EQ, 0); if (len > sizeof(out)-2) return split_fields[1]; o += len; *o++ = '#'; d++; len2 = base64_convert(d, e_b64_mime, strlen(d), o, e_b64_raw, sizeof(out) - len - 2, flg_Base64_MIME_TRAIL_EQ, 0); if (len2 > sizeof(out) - len - 3) return split_fields[1]; len = len2; if (!(p = strchr(o, ' '))) return split_fields[1]; p++; if (p-o >= len) return split_fields[1]; memmove(o, p, len - (p - o) + 1); if (strlen(o) == BINARY_SIZE * 2) return out; } return p; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; if (strstr(ciphertext, "$SOURCE_HASH$")) return ciphertext; strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(strrchr(out, '#')); return out; } static int valid(char *ciphertext, struct fmt_main *self) { int pos, i; char *p; if (!strncmp(ciphertext, "$cram_md5$", 10)) { char *f[10]; f[1] = ciphertext; ciphertext = prepare(f, self); } p = strrchr(ciphertext, '#'); // allow # in salt if (!p || p > &ciphertext[strlen(ciphertext) - 1]) return 0; i = (int)(p - ciphertext); if (i > SALT_LENGTH) return 0; pos = i + 1; if (strlen(ciphertext+pos) != BINARY_SIZE * 2) return 0; for (i = pos; i < BINARY_SIZE*2+pos; i++) { if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static void set_salt(void *salt) { #ifdef SIMD_COEF_32 cur_salt = salt; #else strcpy((char*)cur_salt, (char*)salt); #endif } static void set_key(char *key, int index) { int len; #ifdef SIMD_COEF_32 ARCH_WORD_32 *ipadp = (ARCH_WORD_32*)&ipad[GETPOS(0, index)]; ARCH_WORD_32 *opadp = (ARCH_WORD_32*)&opad[GETPOS(0, index)]; const ARCH_WORD_32 *keyp = (ARCH_WORD_32*)key; unsigned int temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; MD5_CTX ctx; int i; MD5_Init(&ctx); MD5_Update(&ctx, key, len); MD5_Final(k0, &ctx); keyp = (unsigned int*)k0; for(i = 0; i < BINARY_SIZE / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32) { temp = *keyp++; *ipadp ^= temp; *opadp ^= temp; } } else while((unsigned char)(temp = *keyp++)) { if (!(temp & 0xff00) || !(temp & 0xff0000)) { *ipadp ^= (unsigned short)temp; *opadp ^= (unsigned short)temp; break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0xff000000)) break; ipadp += SIMD_COEF_32; opadp += SIMD_COEF_32; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { MD5_CTX ctx; unsigned char k0[BINARY_SIZE]; MD5_Init(&ctx); MD5_Update(&ctx, key, len); MD5_Final(k0, &ctx); len = BINARY_SIZE; for(i = 0; i < len; i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for(i = 0; i < len; i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y = 0; for(; y < (unsigned int)(count + SIMD_COEF_32 - 1) / SIMD_COEF_32; y++) for(x = 0; x < SIMD_COEF_32; x++) { // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32 * PAD_SIZE_W]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (index = 0; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 int i; for(i = 0; i < (BINARY_SIZE/4); i++) // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32) if (((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #if _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 int i; if (new_keys) { SIMDmd5body(&ipad[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); SIMDmd5body(&opad[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); } SIMDmd5body(cur_salt->salt[0], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); for (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++) { SIMDmd5body(cur_salt->salt[i], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT); } SIMDmd5body(&crypt_key[index * PAD_SIZE], (unsigned int*)&crypt_key[index * PAD_SIZE], (unsigned int*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); #else MD5_CTX ctx; if (new_keys) { MD5_Init(&ipad_ctx[index]); MD5_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); MD5_Init(&opad_ctx[index]); MD5_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); MD5_Update(&ctx, cur_salt, strlen((char*)cur_salt)); MD5_Final((unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); MD5_Update(&ctx, crypt_key[index], BINARY_SIZE); MD5_Final((unsigned char*) crypt_key[index], &ctx); #endif } new_keys = 0; return count; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; char *p; int i; // allow # in salt p = strrchr(ciphertext, '#') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return (void*)out; } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH+1]; int len; #ifdef SIMD_COEF_32 unsigned int i = 0; static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt; int salt_len = 0; #endif // allow # in salt len = strrchr(ciphertext, '#') - ciphertext; memset(salt, 0, sizeof(salt)); memcpy(salt, ciphertext, len); #ifdef SIMD_COEF_32 memset(&cur_salt, 0, sizeof(cur_salt)); while(((unsigned char*)salt)[salt_len]) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = ((unsigned char*)salt)[salt_len]; ++salt_len; } cur_salt.salt_len = salt_len; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80; ((unsigned int*)cur_salt.salt[(salt_len + 8) / PAD_SIZE])[14 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32] = (salt_len + PAD_SIZE) << 3; } return &cur_salt; #else return salt; #endif } struct fmt_main fmt_hmacMD5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
omp_parallel_for_ordered_nothreadprivate.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" static int last_i = 0; /*! Utility function: returns true if the passed argument is larger than the argument of the last call of this function. */ static int check_i_islarger2(int i) { int islarger; islarger = (i > last_i); last_i = i; return (islarger); } int test_omp_parallel_for_ordered() { int sum; int is_larger; int known_sum; int i; sum = 0; is_larger = 1; last_i = 0; #pragma omp parallel for schedule(static,1) private(i) ordered for (i = 1; i < 100; i++) { int ii = i; #pragma omp ordered { is_larger = check_i_islarger2 (ii) && is_larger; sum = sum + ii; } } known_sum = (99 * 100) / 2; fprintf (stderr," known_sum = %d , sum = %d \n", known_sum, sum); fprintf (stderr," is_larger = %d\n", is_larger); return (known_sum == sum) && is_larger; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_parallel_for_ordered()) { num_failed++; } } return num_failed; }
gemm.c
#include "gemm.h" #include "utils.h" #include "im2col.h" #include "dark_cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <float.h> #include <string.h> #include <stdint.h> #ifdef _WIN32 #include <intrin.h> #endif #if defined(_OPENMP) #include <omp.h> #endif #define TILE_M 4 // 4 ops #define TILE_N 16 // AVX2 = 2 ops * 8 floats #define TILE_K 16 // loop #ifdef __cplusplus #define PUT_IN_REGISTER #else #define PUT_IN_REGISTER register #endif void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float* m = (float*)xcalloc(rows * cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } //-------------------------------------------- // XNOR bitwise GEMM for binary neural network //-------------------------------------------- static inline unsigned char xnor(unsigned char a, unsigned char b) { //return a == b; return !(a^b); } // INT-32 static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) { size_t src_i = index / 32; int src_shift = index % 32; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } static inline uint32_t fill_bit_int32(char src) { if (src == 0) return 0x00000000; else return 0xFFFFFFFF; } static inline uint64_t fill_bit_int64(char src) { if (src == 0) return 0x0000000000000000; else return 0xFFFFFFFFFFFFFFFF; } void binary_int32_printf(uint32_t src) { int i; for (i = 0; i < 32; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void binary_int64_printf(uint64_t src) { int i; for (i = 0; i < 64; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = xcalloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k*ldb + j); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = xcalloc(M*N, sizeof(int)); int i, j, k; for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] char a_bit = get_bit(A, i*lda + k); char b_bit = get_bit(B, j*ldb + k); count_arr[i*ldc + j] += xnor(a_bit, b_bit); } } } for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; } } free(count_arr); } */ /* void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int *count_arr = xcalloc(M*N, sizeof(int)); int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216] const char a_bit = get_bit(A, i*lda + k); uint64_t a_bit64 = fill_bit_int64(a_bit); int k_ldb = k*ldb; for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056] if ((N - j > 64) && (k_ldb % 8 == 0)) { uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); //printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc printf("\n %d \n", __popcnt64(c_bit64)); // msvs int h; for (h = 0; h < 64; ++h) if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1; //binary_int64_printf(a_bit64); //binary_int64_printf(b_bit64); //binary_int64_printf(c_bit64); } else { for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] char b_bit = get_bit(B, k_ldb + j); if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1; } } } } } if (mean_arr) { //int K_2 = K / 2; for (i = 0; i < M; ++i) { float mean_val = mean_arr[i]; //float mean_val2 = 2 * mean_val; for (j = 0; j < N; ++j) { C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val; //C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2; } } } else { for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { C[i*ldc + j] = count_arr[i*ldc + j] - K / 2; } } } free(count_arr); //getchar(); } */ /* void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k, h; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); #ifdef WIN32 int tmp_count = __popcnt64(c_bit64); #else int tmp_count = __builtin_popcountll(c_bit64); #endif if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } */ //---------------------------- // is not used /* void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb) { unsigned int x, y; for (y = 0; y < 32; ++y) { for (x = 0; x < 32; ++x) { if (A[y * lda] & ((uint32_t)1 << x)) B[x * ldb] |= (uint32_t)1 << y; } } } */ #ifndef GPU uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } uint32_t reverse_32_bit(uint32_t a) { // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return (reverse_8_bit(a >> 24) << 0) | (reverse_8_bit(a >> 16) << 8) | (reverse_8_bit(a >> 8) << 16) | (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { unsigned A_tmp[32]; int i; #pragma unroll for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb) { unsigned x, y; for (y = 0; y < 8; ++y) { for (x = 0; x < 8; ++x) { if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y; } } } unsigned char reverse_byte_1(char a) { return ((a & 0x1) << 7) | ((a & 0x2) << 5) | ((a & 0x4) << 3) | ((a & 0x8) << 1) | ((a & 0x10) >> 1) | ((a & 0x20) >> 3) | ((a & 0x40) >> 5) | ((a & 0x80) >> 7); } unsigned char reverse_byte(unsigned char a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } static unsigned char lookup[16] = { 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, }; unsigned char reverse_byte_3(unsigned char n) { // Reverse the top and bottom nibble then swap them. return (lookup[n & 0b1111] << 4) | lookup[n >> 4]; } void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n) { unsigned x, y, t; x = y = 0; // Load the array and pack it into x and y. //x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]; //y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]; t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x); B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y); } /* // transpose by 8-bit void transpose_bin(char *A, char *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda); int i; #pragma omp parallel for for (i = 0; i < n; i += 8) { int j; for (j = 0; j < m; j += 8) { int a_index = i*lda + j; int b_index = j*ldb + i; //transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8); transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8); } for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); } } } */ #endif // transpose by 32-bit void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { //printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32); //printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32); int i; #pragma omp parallel for for (i = 0; i < n; i += 32) { int j; for (j = 0; j < m; j += 32) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); //transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32); } for (; j < m; ++j) { if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i); } } } static inline int popcnt_32(uint32_t val32) { #ifdef WIN32 // Windows MSVS int tmp_count = __popcnt(val32); #else // Linux GCC int tmp_count = __builtin_popcount(val32); #endif return tmp_count; } //---------------------------- #if (defined(__AVX__) && defined(__x86_64__)) || (defined(_WIN64) && !defined(__MINGW32__)) #if (defined(_WIN64) && !defined(__MINGW64__)) #include <intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #if defined(_MSC_VER) && _MSC_VER <= 1900 static inline __int32 _mm256_extract_epi64(__m256i a, const int index) { return a.m256i_i64[index]; } static inline __int32 _mm256_extract_epi32(__m256i a, const int index) { return a.m256i_i32[index]; } #endif static inline float _dn_castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { return a.m256_f32[index]; } #else // Linux GCC/Clang #include <x86intrin.h> #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #include <cpuid.h> static inline float _dn_castu32_f32(uint32_t a) { return *((float *)&a); } static inline float _mm256_extract_float32(__m256 a, const int index) { switch(index) { case 0: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0)); case 1: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 1)); case 2: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 2)); case 3: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 3)); case 4: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 4)); case 5: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 5)); case 6: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 6)); case 7: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 7)); default: return _dn_castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0)); } } void asm_cpuid(uint32_t* abcd, uint32_t eax) { uint32_t ebx = 0, edx = 0, ecx = 0; // EBX is saved to EDI and later restored __asm__("movl %%ebx, %%edi;" "cpuid;" "xchgl %%ebx, %%edi;" : "=D"(ebx), "+a"(eax), "+c"(ecx), "=d"(edx)); abcd[0] = eax; abcd[1] = ebx; abcd[2] = ecx; abcd[3] = edx; } #endif #ifdef _WIN32 // Windows #define cpuid(info, x) __cpuidex(info, x, 0) #else // GCC Intrinsics void cpuid(int info[4], int InfoType) { __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]); } #endif // Misc. static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1; static int HW_ABM; // Advanced Bit Manipulation // SIMD: 128-bit static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA; // SIMD: 256-bit static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2; // SIMD: 512-bit static int HW_AVX512F; // AVX512 Foundation static int HW_AVX512CD; // AVX512 Conflict Detection static int HW_AVX512PF; // AVX512 Prefetch static int HW_AVX512ER; // AVX512 Exponential + Reciprocal static int HW_AVX512VL; // AVX512 Vector Length Extensions static int HW_AVX512BW; // AVX512 Byte + Word static int HW_AVX512DQ; // AVX512 Doubleword + Quadword static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions // https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set void check_cpu_features(void) { int info[4]; cpuid(info, 0); int nIds = info[0]; cpuid(info, 0x80000000); unsigned nExIds = info[0]; // Detect Features if (nIds >= 0x00000001) { cpuid(info, 0x00000001); HW_MMX = (info[3] & ((uint32_t)1 << 23)) != 0; HW_SSE = (info[3] & ((uint32_t)1 << 25)) != 0; HW_SSE2 = (info[3] & ((uint32_t)1 << 26)) != 0; HW_SSE3 = (info[2] & ((uint32_t)1 << 0)) != 0; HW_SSSE3 = (info[2] & ((uint32_t)1 << 9)) != 0; HW_SSE41 = (info[2] & ((uint32_t)1 << 19)) != 0; HW_SSE42 = (info[2] & ((uint32_t)1 << 20)) != 0; HW_AES = (info[2] & ((uint32_t)1 << 25)) != 0; HW_AVX = (info[2] & ((uint32_t)1 << 28)) != 0; HW_FMA3 = (info[2] & ((uint32_t)1 << 12)) != 0; HW_RDRAND = (info[2] & ((uint32_t)1 << 30)) != 0; } if (nIds >= 0x00000007) { cpuid(info, 0x00000007); HW_AVX2 = (info[1] & ((uint32_t)1 << 5)) != 0; HW_BMI1 = (info[1] & ((uint32_t)1 << 3)) != 0; HW_BMI2 = (info[1] & ((uint32_t)1 << 8)) != 0; HW_ADX = (info[1] & ((uint32_t)1 << 19)) != 0; HW_SHA = (info[1] & ((uint32_t)1 << 29)) != 0; HW_PREFETCHWT1 = (info[2] & ((uint32_t)1 << 0)) != 0; HW_AVX512F = (info[1] & ((uint32_t)1 << 16)) != 0; HW_AVX512CD = (info[1] & ((uint32_t)1 << 28)) != 0; HW_AVX512PF = (info[1] & ((uint32_t)1 << 26)) != 0; HW_AVX512ER = (info[1] & ((uint32_t)1 << 27)) != 0; HW_AVX512VL = (info[1] & ((uint32_t)1 << 31)) != 0; HW_AVX512BW = (info[1] & ((uint32_t)1 << 30)) != 0; HW_AVX512DQ = (info[1] & ((uint32_t)1 << 17)) != 0; HW_AVX512IFMA = (info[1] & ((uint32_t)1 << 21)) != 0; HW_AVX512VBMI = (info[2] & ((uint32_t)1 << 1)) != 0; } if (nExIds >= 0x80000001) { cpuid(info, 0x80000001); HW_x64 = (info[3] & ((uint32_t)1 << 29)) != 0; HW_ABM = (info[2] & ((uint32_t)1 << 5)) != 0; HW_SSE4a = (info[2] & ((uint32_t)1 << 6)) != 0; HW_FMA4 = (info[2] & ((uint32_t)1 << 16)) != 0; HW_XOP = (info[2] & ((uint32_t)1 << 11)) != 0; } } int is_avx() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_AVX; if (result == 1) printf(" Used AVX \n"); else printf(" Not used AVX \n"); } return result; } int is_fma_avx2() { static int result = -1; if (result == -1) { check_cpu_features(); result = HW_FMA3 && HW_AVX2; if (result == 1) printf(" Used FMA & AVX2 \n"); else printf(" Not used FMA & AVX2 \n"); } return result; } // https://software.intel.com/sites/landingpage/IntrinsicsGuide void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; if (is_avx() == 1) { // AVX for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { float A_PART = ALPHA*A[i*lda + k]; __m256 a256, b256, c256, result256; // AVX a256 = _mm256_set1_ps(A_PART); for (j = 0; j < N - 8; j += 8) { b256 = _mm256_loadu_ps(&B[k*ldb + j]); c256 = _mm256_loadu_ps(&C[i*ldc + j]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //result256 = _mm256_fmadd_ps(a256, b256, c256); result256 = _mm256_mul_ps(a256, b256); result256 = _mm256_add_ps(result256, c256); _mm256_storeu_ps(&C[i*ldc + j], result256); } int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8; for (j = prev_end; j < N; ++j) C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } else { for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } /* // SSE __m128 a128, b128, c128, result128; // SSE a128 = _mm_set1_ps(A_PART); for (j = 0; j < N - 4; j += 4) { b128 = _mm_loadu_ps(&B[k*ldb + j]); c128 = _mm_loadu_ps(&C[i*ldc + j]); //result128 = _mm_fmadd_ps(a128, b128, c128); result128 = _mm_mul_ps(a128, b128); result128 = _mm_add_ps(result128, c128); _mm_storeu_ps(&C[i*ldc + j], result128); } int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4; for (j = prev_end; j < N; ++j){ C[i*ldc + j] += A_PART*B[k*ldb + j]; } */ } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i; #pragma omp parallel for for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M) { int j, k; int i_d, k_d; for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K) { for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N) { // L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB // L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB // L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB __m256 result256; __m256 a256_0, b256_0; // AVX __m256 a256_1, b256_1; // AVX __m256 a256_2;// , b256_2; // AVX __m256 a256_3;// , b256_3; // AVX __m256 c256_0, c256_1, c256_2, c256_3; __m256 c256_4, c256_5, c256_6, c256_7; c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]); c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]); c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]); c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]); c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]); c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]); c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]); c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]); for (k_d = 0; k_d < (TILE_K); ++k_d) { a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]); a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]); a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]); a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]); b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]); b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0); //c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1); //c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2); //c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3); //c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4); //c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5); //c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6); //c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7); result256 = _mm256_mul_ps(a256_0, b256_0); c256_0 = _mm256_add_ps(result256, c256_0); result256 = _mm256_mul_ps(a256_1, b256_0); c256_1 = _mm256_add_ps(result256, c256_1); result256 = _mm256_mul_ps(a256_0, b256_1); c256_2 = _mm256_add_ps(result256, c256_2); result256 = _mm256_mul_ps(a256_1, b256_1); c256_3 = _mm256_add_ps(result256, c256_3); result256 = _mm256_mul_ps(a256_2, b256_0); c256_4 = _mm256_add_ps(result256, c256_4); result256 = _mm256_mul_ps(a256_3, b256_0); c256_5 = _mm256_add_ps(result256, c256_5); result256 = _mm256_mul_ps(a256_2, b256_1); c256_6 = _mm256_add_ps(result256, c256_6); result256 = _mm256_mul_ps(a256_3, b256_1); c256_7 = _mm256_add_ps(result256, c256_7); } _mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0); _mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1); _mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2); _mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3); _mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4); _mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5); _mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6); _mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7); } for (j = (N / TILE_N)*TILE_N; j < N; ++j) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { for (k_d = k; k_d < (k + TILE_K); ++k_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d]; C[i_d*ldc + j] += A_PART*B[k_d*ldb + j]; } } } } for (k = (K / TILE_K)*TILE_K; k < K; ++k) { for (i_d = i; i_d < (i + TILE_M); ++i_d) { PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k]; for (j = 0; j < N; ++j) { C[i_d*ldc + j] += A_PART*B[k*ldb + j]; } } } } for (i = (M / TILE_M)*TILE_M; i < M; ++i) { int j, k; for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s]; __m256i a256 = _mm256_set1_epi32(A_PART); for (j = 0; j < N - 8; j += 8) { __m256i b256 = *((__m256i*)&B[s*ldb + j]); __m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b) __m256i all_1 = _mm256_set1_epi8((char)255); __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b)) // waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a) __m256 count = _mm256_setr_ps( popcnt_32(_mm256_extract_epi32(xnor256, 0)), popcnt_32(_mm256_extract_epi32(xnor256, 1)), popcnt_32(_mm256_extract_epi32(xnor256, 2)), popcnt_32(_mm256_extract_epi32(xnor256, 3)), popcnt_32(_mm256_extract_epi32(xnor256, 4)), popcnt_32(_mm256_extract_epi32(xnor256, 5)), popcnt_32(_mm256_extract_epi32(xnor256, 6)), popcnt_32(_mm256_extract_epi32(xnor256, 7))); __m256 val2 = _mm256_set1_ps(2); count = _mm256_mul_ps(count, val2); // count * 2 __m256 val32 = _mm256_set1_ps(32); count = _mm256_sub_ps(count, val32); // count - 32 __m256 mean256 = _mm256_set1_ps(mean_val); count = _mm256_mul_ps(count, mean256); // count * mean_val __m256 c256 = *((__m256*)&C[i*ldc + j]); count = _mm256_add_ps(count, c256); // c = c + count *((__m256*)&C[i*ldc + j]) = count; } for (; j < N; ++j) // out_h*out_w; { PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { //int i, f, j; int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads( max_num_threads / 2); } #endif //convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output); __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); for (i = 0; i < ksize*ksize*n*c; i+=8) { *((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1)); } //for (i = 0; i < w*h*c; i += 8) { //(*(__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1)); //} //__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF); //all256_last_zero.m256i_i32[7] = 0; __m256i all256_last_zero = _mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0); __m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1); //__m256 all256_sing1 = _mm256_set1_ps(0x80000000); __m256 all256_one = _mm256_set1_ps(1); __m256i all256i_one = _mm256_set1_epi32(1); ///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); ///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; float cur_mean = fabs(mean[fil]); __m256 mean256 = _mm256_set1_ps(cur_mean); // channel index //for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w-8; x+=8) { int const output_index = fil*w*h + y*w + x; float sum = 0; __m256 sum256 = _mm256_set1_ps(0); for (chan = 0; chan < c; ++chan) { int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; //__m256 in = *((__m256*)&input[input_pre_index + input_y*w]); if (input_y < 0 || input_y >= h) continue; //__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]); // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; //if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; //if (input_y < 0 || input_y >= h) continue; //sum += input[input_index] * weights[weights_index]; __m256 in = *((__m256*)&input[input_index]); __m256 w = _mm256_set1_ps(weights[weights_index]); //__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats __m256 xor256 = _mm256_xor_ps(w, in); //printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]); //printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]); //__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256); //sum256 = xor256; sum256 = _mm256_add_ps(xor256, sum256); //printf("\n --- \n"); //printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]); if (f_x < ksize-1) { //in = _mm256_permutevar8x32_ps(in, idx256); //in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero)); } } } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; //output[output_index] += sum; sum256 = _mm256_mul_ps(sum256, mean256); //printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n", // cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]); //__m256 out = *((__m256*)&output[output_index]); //out = _mm256_add_ps(out, sum256); //(*(__m256*)&output[output_index]) = out; *((__m256*)&output[output_index]) = sum256; //_mm256_storeu_ps(&C[i*ldc + j], result256); } } } // http://graphics.stanford.edu/~seander/bithacks.html // https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register // https://arxiv.org/pdf/1611.07612.pdf static inline int popcnt128(__m128i n) { const __m128i n_hi = _mm_unpackhi_epi64(n, n); #if defined(_MSC_VER) return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi)); #elif defined(__APPLE__) && defined(__clang__) return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi)); #else return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi)); #endif } static inline int popcnt256(__m256i n) { return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1)); } static inline __m256i count256(__m256i v) { __m256i lookup = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); __m256i low_mask = _mm256_set1_epi8(0x0f); __m256i lo = _mm256_and_si256(v, low_mask); __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask); __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo); __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi); __m256i total = _mm256_add_epi8(popcnt1, popcnt2); return _mm256_sad_epu8(total, _mm256_setzero_si256()); } static inline int popcnt256_custom(__m256i n) { __m256i val = count256(n); //return val.m256i_i64[0] + //val.m256i_i64[1] + //val.m256i_i64[2] + //val.m256i_i64[3]; return _mm256_extract_epi64(val, 0) + _mm256_extract_epi64(val, 1) + _mm256_extract_epi64(val, 2) + _mm256_extract_epi64(val, 3); } static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) { __m256i c_bit256 = _mm256_set1_epi8((char)255); __m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b)) c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT *count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm } // 2nd part - popcnt Mula's algorithm static inline int get_count_mula(__m256i count_sum) { return _mm256_extract_epi64(count_sum, 0) + _mm256_extract_epi64(count_sum, 1) + _mm256_extract_epi64(count_sum, 2) + _mm256_extract_epi64(count_sum, 3); } // 5x times faster than gemm()-float32 // further optimizations: do mean-mult only for the last layer void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads(max_num_threads / 2); } #endif //#pragma omp parallel for //for (i = 0; i < M; ++i) #pragma omp parallel for for (i = 0; i < (M/2)*2; i += 2) { // l.n - filters [16 - 55 - 1024] float mean_val_0 = mean_arr[i + 0]; float mean_val_1 = mean_arr[i + 1]; int j, k; //__m256i all_1 = _mm256_set1_epi8(255); //for (j = 0; j < N; ++j) for (j = 0; j < (N/2)*2; j += 2) { // out_h*out_w - one channel output size [169 - 173056] //int count = 0; const int bit_step = 256; __m256i count_sum_0 = _mm256_set1_epi8(0); __m256i count_sum_1 = _mm256_set1_epi8(0); __m256i count_sum_2 = _mm256_set1_epi8(0); __m256i count_sum_3 = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); __m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8)); __m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0); xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1); xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2); xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3); //count += popcnt256(c_bit256); //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } int count_0 = get_count_mula(count_sum_0); int count_1 = get_count_mula(count_sum_1); int count_2 = get_count_mula(count_sum_2); int count_3 = get_count_mula(count_sum_3); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count_0 = count_0 - f1; // remove extra bits (from empty space for align only) count_1 = count_1 - f1; count_2 = count_2 - f1; count_3 = count_3 - f1; C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0; C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0; C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1; C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1; } int i_d; for (i_d = 0; i_d < 2; ++i_d) { float mean_val = mean_arr[i + i_d]; for (j = (N / 2) * 2; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val; } } } for (i = (M / 2) * 2; i < M; i += 1) { float mean_val = mean_arr[i]; int j, k; for (j = 0; j < N; j += 1) { // out_h*out_w - one channel output size [169 - 173056] const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8)); __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8)); xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum); } int count = get_count_mula(count_sum); const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val; } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; int c; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 4; w+=8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0]; data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1]; data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2]; data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3]; data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4]; data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5]; data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6]; data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7]; //_mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col-pad; ++h) { for (w = pad; w < width_col-pad-8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col-1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col-1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_align(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.00); int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 //mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8]; *dst_ptr |= (mask << (col_index % 8)); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char* const)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) {} else if (a == LEAKY) { if (is_fma_avx2()) { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 all256_01 = _mm256_set1_ps(0.1F); for (i = 0; i < n - 8; i += 8) { //x[i] = (x[i]>0) ? x[i] : .1*x[i]; __m256 src256 = _mm256_loadu_ps(&x[i]); __m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1 __m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats __m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult; _mm256_storeu_ps(&x[i], result256); } } for (; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.0); for (i = 0; i < size; i+=8) { //__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 ////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&src[i])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 dst[i / 8] = mask; } } static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb) { __m128 row1 = _mm_loadu_ps(&A[0 * lda]); __m128 row2 = _mm_loadu_ps(&A[1 * lda]); __m128 row3 = _mm_loadu_ps(&A[2 * lda]); __m128 row4 = _mm_loadu_ps(&A[3 * lda]); _MM_TRANSPOSE4_PS(row1, row2, row3, row4); _mm_storeu_ps(&B[0 * ldb], row1); _mm_storeu_ps(&B[1 * ldb], row2); _mm_storeu_ps(&B[2 * ldb], row3); _mm_storeu_ps(&B[3 * ldb], row4); } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; //int max_i2 = (i + block_size < n) ? (i + block_size) : n; if (i + block_size < n) { int max_i2 = i + block_size; for (j = 0; j < m; j += block_size) { //int max_j2 = (j + block_size < m) ? (j + block_size) : m; if (j + block_size < m) { int max_j2 = j + block_size; for (i2 = i; i2 < max_i2; i2 += 4) { for (j2 = j; j2 < max_j2; j2 += 4) { transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb); } } } else { for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } else { for (i2 = i; i2 < n; ++i2) { for (j2 = 0; j2 < m; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { const int w_offset = -pad / 2; const int h_offset = -pad / 2; int b, k; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { //for (j = 0; j < out_w; ++j) { j = 0; if(stride == 1 && is_avx() == 1) { for (j = 0; j < out_w - 8 - (size - 1); j += 8) { int out_index = j + out_w*(i + out_h*(k + c*b)); __m256 max256 = _mm256_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); max256 = _mm256_max_ps(src256, max256); } } _mm256_storeu_ps(&dst[out_index], max256); } } else if (size == 2 && stride == 2 && is_avx() == 1) { for (j = 0; j < out_w - 4; j += 4) { int out_index = j + out_w*(i + out_h*(k + c*b)); //float max = -FLT_MAX; //int max_i = -1; __m128 max128 = _mm_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { //for (m = 0; m < size; ++m) m = 0; { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); __m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4)); __m256 max256 = _mm256_max_ps(src256, src256_2); __m128 src128_0 = _mm256_extractf128_ps(max256, 0); __m128 src128_1 = _mm256_extractf128_ps(max256, 1); __m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6)); max128 = _mm_max_ps(src128, max128); } } _mm_storeu_ps(&dst[out_index], max128); } } for (; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; if (indexes) indexes[out_index] = max_i; } } } } } #else // AVX int is_avx() { return 0; } int is_fma_avx2() { return 0; } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_fast(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]); for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { //PUT_IN_REGISTER float A_PART = 1*a[i*k + s]; PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s]; for (j = 0; j < N; ++j) // out_h*out_w; { //c[i*n + j] += A_PART*b[s*n + j]; PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); //printf(" xnor_result = %d, ", xnor_result); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; //c[i*n + j] += count*mean; } } } } void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride, float *weights, float *input, float *output, float *mean) { const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1 const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1 //int i, f, j; int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < n; ++fil) { int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < c; ++chan) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize; int const input_pre_index = chan*w*h; float sum = 0; // filter - y for (f_y = 0; f_y < ksize; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < ksize; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; int input_index = input_pre_index + input_y*w + input_x; int weights_index = weights_pre_index + f_y*ksize + f_x; sum += input[input_index] * weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } static inline int popcnt_64(uint64_t val64) { #ifdef WIN32 // Windows #ifdef _WIN64 // Windows 64-bit int tmp_count = __popcnt64(val64); #else // Windows 32-bit int tmp_count = __popcnt(val64); tmp_count += __popcnt(val64 >> 32); #endif #else // Linux #if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit int tmp_count = __builtin_popcountll(val64); #else // Linux 32-bit int tmp_count = __builtin_popcount(val64); tmp_count += __builtin_popcount(val64 >> 32); #endif #endif return tmp_count; } void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] int j, k; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = popcnt_64(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } void im2col_cpu_custom_transpose(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int ldb_align) { printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n"); } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); return; int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 1) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit((unsigned char*)data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i; if (a == LINEAR) { } else if (a == LEAKY) { for (i = 0; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; char* byte_arr = (char*)xcalloc(size, sizeof(char)); for (i = 0; i < size; ++i) { if (src[i] > 0) byte_arr[i] = 1; } //for (i = 0; i < size; ++i) { // dst[i / 8] |= byte_arr[i] << (i % 8); //} for (i = 0; i < size; i += 8) { char dst_tmp = 0; dst_tmp |= byte_arr[i + 0] << 0; dst_tmp |= byte_arr[i + 1] << 1; dst_tmp |= byte_arr[i + 2] << 2; dst_tmp |= byte_arr[i + 3] << 3; dst_tmp |= byte_arr[i + 4] << 4; dst_tmp |= byte_arr[i + 5] << 5; dst_tmp |= byte_arr[i + 6] << 6; dst_tmp |= byte_arr[i + 7] << 7; dst[i / 8] = dst_tmp; } free(byte_arr); } static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size) { int i; //#pragma omp parallel for for (i = 0; i<block_size; i++) { int j; for (j = 0; j<block_size; j++) { B[j*ldb + i] = A[i*lda + j]; } } } void transpose_block_SSE4x4(float *A, float *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += block_size) { int j, i2, j2; for (j = 0; j < m; j += block_size) { int max_i2 = i + block_size < n ? i + block_size : n; int max_j2 = j + block_size < m ? j + block_size : m; for (i2 = i; i2 < max_i2; ++i2) { for (j2 = j; j2 < max_j2; ++j2) { B[j2*ldb + i2] = A[i2*lda + j2]; } } } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { int b, k; const int w_offset = -pad / 2; const int h_offset = -pad / 2; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { for (j = 0; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; if (indexes) indexes[out_index] = max_i; } } } } } #endif // AVX // 32 channels -> 1 channel (with 32 floats) // 256 channels -> 8 channels (with 32 floats) void repack_input(float *input, float *re_packed_input, int w, int h, int c) { const int items_per_channel = w * h; int chan, i; for (chan = 0; chan < c; chan += 32) { for (i = 0; i < items_per_channel; ++i) { int c_pack; for (c_pack = 0; c_pack < 32; ++c_pack) { float src = input[(chan + c_pack)*items_per_channel + i]; re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src; } } } } void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align) { //l.bit_align - algined (n) by 32 //new_ldb - aligned (k) by 256 int i; //#pragma omp parallel for for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c; { int j; for (j = 0; j < src_w; j += 1) // out_h*out_w; { ((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j]; } } } void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) // out_h*out_w; { float val = 0; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s]; PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int val += (2 * count - 32) * mean_val; } C[i*ldc + j] += val; } } } void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output, int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr) { int fil; // filter index #pragma omp parallel for for (fil = 0; fil < n; ++fil) { float mean_val = mean_arr[fil]; int chan, y, x, f_y, f_x; // c_pack // channel index for (chan = 0; chan < c / 32; ++chan) //for (chan = 0; chan < l.c; chan += 32) //for (c_pack = 0; c_pack < 32; ++c_pack) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; float sum = 0; // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; // normal //float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x]; //float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x]; // packed //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //sum += input * weight; //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //uint32_t bit1 = input > 0; //uint32_t bit2 = weight > 0; //uint32_t count = (~(bit1 ^ bit2)) & 1; //float result = (2 * (float)count - 1) * mean_val; //printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result); //sum += result; uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x]; //uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x]; uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x]; uint32_t xnor_result = ~(input ^ weight); int32_t count = popcnt_32(xnor_result); // mandatory Signed int sum += (2 * count - 32) * mean_val; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ PUT_IN_REGISTER float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); if (BETA != 1){ int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } } is_avx(); // initialize static variable if (is_fma_avx2() && !TA && !TB) { gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); } else { int t; #pragma omp parallel for for (t = 0; t < M; ++t) { if (!TA && !TB) gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else if (TA && !TB) gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); else if (!TA && TB) gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc); else gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc); } } } #ifdef GPU #include <math.h> void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream()); CHECK_CUDA(stream_status); cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); CHECK_CUDA(status); } void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M)); float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K)); float *C_gpu = cuda_make_array(C, ldc*M); gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc); cuda_pull_array(C_gpu, C, ldc*M); cuda_free(A_gpu); cuda_free(B_gpu); cuda_free(C_gpu); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_ongpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaDeviceSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,64,2916,363); time_ongpu(0,0,192,729,1600); time_ongpu(0,0,384,196,1728); time_ongpu(0,0,256,196,3456); time_ongpu(0,0,256,196,2304); time_ongpu(0,0,128,4096,12544); time_ongpu(0,0,128,4096,4096); */ time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,75,12544); time_ongpu(0,0,64,576,12544); time_ongpu(0,0,256,2304,784); time_ongpu(1,1,2304,256,784); time_ongpu(0,0,512,4608,196); time_ongpu(1,1,4608,512,196); return 0; } #endif void init_cpu() { is_avx(); is_fma_avx2(); }
FirstDerivativeFourthOrder.c
/*! @file FirstDerivativeFourthOrder.c @author Debojyoti Ghosh @brief Fourth order finite-difference approximation to the first derivative */ #include <stdio.h> #include <stdlib.h> #include <basic.h> #include <arrayfunctions.h> #include <firstderivative.h> #include <mpivars.h> #include <hypar.h> typedef MPIVariables MPIContext; typedef HyPar SolverContext; #ifdef with_omp #include <omp.h> #endif /*! Computes the fourth-order finite-difference approximation to the first derivative (\b Note: not divided by the grid spacing): \f{equation}{ \left(\partial f\right)_i = \left\{ \begin{array}{ll} \frac{1}{12}\left(-25f_i+48f_{i+1}-36f_{i+2}+16f_{i+3}-3f_{i+4}\right) & i=-g \\ \frac{1}{12}\left(-3f_{i-1}-10f_i+18f_{i+1}-6f_{i+2}+f_{i+3}\right) & i = -g+1 \\ \frac{1}{2}\left( f_{i-2}-8f_{i-1}+8f_{i+1}-f_{i+2} \right) & -g+2 \leq i \leq N+g-3 \\ \frac{1}{12}\left( -f_{i-3}+6f_{i-2}-18f_{i-1}+10f_i+3f_{i+1}\right) & i = N+g-2 \\ \frac{1}{12}\left( 3f_{i-4}-16f_{i-3}+36f_{i-2}-48f_{i-1}+25f_i \right) & i = N+g-1 \end{array}\right. \f} where \f$i\f$ is the grid index along the spatial dimension of the derivative, \f$g\f$ is the number of ghost points, and \f$N\f$ is the number of grid points (not including the ghost points) in the spatial dimension of the derivative. \n\n Notes: + The first derivative is computed at the grid points or the cell centers. + The first derivative is computed at the ghost points too. Thus, biased schemes are used at and near the boundaries. + \b Df and \b f are 1D arrays containing the function and its computed derivatives on a multi- dimensional grid. The derivative along the specified dimension \b dir is computed by looping through all grid lines along \b dir. */ int FirstDerivativeFourthOrderCentral( double *Df, /*!< Array to hold the computed first derivative (with ghost points) */ double *f, /*!< Array containing the grid point function values whose first derivative is to be computed (with ghost points) */ int dir, /*!< The spatial dimension along which the derivative is computed */ int bias, /*!< Forward or backward differencing for non-central finite-difference schemes (-1: backward, 1: forward)*/ void *s, /*!< Solver object of type #SolverContext */ void *m /*!< MPI object of type #MPIContext */ ) { SolverContext *solver = (SolverContext*) s; int i, j, v; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; if ((!Df) || (!f)) { fprintf(stderr, "Error in FirstDerivativeFourthOrder(): input arrays not allocated.\n"); return(1); } static double one_twelve = 1.0/12.0; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], index_outer[ndims], bounds_outer[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); #pragma omp parallel for schedule(auto) default(shared) private(i,j,v,index_outer,indexC) for (j=0; j<N_outer; j++) { _ArrayIndexnD_(ndims,j,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); /* left boundary */ for (i = -ghosts; i < -ghosts+1; i++) { int qC, qp1, qp2, qp3, qp4; indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); indexC[dir] = i+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = i+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); indexC[dir] = i+3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp3); indexC[dir] = i+4; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp4); for (v=0; v<nvars; v++) Df[qC*nvars+v] = (-25*f[qC*nvars+v]+48*f[qp1*nvars+v]-36*f[qp2*nvars+v]+16*f[qp3*nvars+v]-3*f[qp4*nvars+v])*one_twelve; } for (i = -ghosts+1; i < -ghosts+2; i++) { int qC, qm1, qp1, qp2, qp3; indexC[dir] = i-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); indexC[dir] = i+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = i+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); indexC[dir] = i+3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp3); for (v=0; v<nvars; v++) Df[qC*nvars+v] = (-3*f[qm1*nvars+v]-10*f[qC*nvars+v]+18*f[qp1*nvars+v]-6*f[qp2*nvars+v]+f[qp3*nvars+v])*one_twelve; } /* interior */ for (i = -ghosts+2; i < dim[dir]+ghosts-2; i++) { int qC, qm1, qm2, qp1, qp2; indexC[dir] = i-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = i-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); indexC[dir] = i+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = i+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); for (v=0; v<nvars; v++) Df[qC*nvars+v] = (f[qm2*nvars+v]-8*f[qm1*nvars+v]+8*f[qp1*nvars+v]-f[qp2*nvars+v])*one_twelve; } /* right boundary */ for (i = dim[dir]+ghosts-2; i < dim[dir]+ghosts-1; i++) { int qC, qm3, qm2, qm1, qp1; indexC[dir] = i-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3); indexC[dir] = i-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = i-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); indexC[dir] = i+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); for (v=0; v<nvars; v++) Df[qC*nvars+v] = (-f[qm3*nvars+v]+6*f[qm2*nvars+v]-18*f[qm1*nvars+v]+10*f[qC*nvars+v]+3*f[qp1*nvars+v])*one_twelve; } for (i = dim[dir]+ghosts-1; i < dim[dir]+ghosts; i++) { int qC, qm4, qm3, qm2, qm1; indexC[dir] = i-4; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm4); indexC[dir] = i-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3); indexC[dir] = i-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = i-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); for (v=0; v<nvars; v++) Df[qC*nvars+v] = (3*f[qm4*nvars+v]-16*f[qm3*nvars+v]+36*f[qm2*nvars+v]-48*f[qm1*nvars+v]+25*f[qC*nvars+v])*one_twelve; } } return(0); }
XT_ICD_update.c
/* =========================================================================== * Copyright (c) 2013 K. Aditya Mohan (Purdue University) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of K. Aditya Mohan, Purdue * University, nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include "XT_Constants.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include "allocate.h" #include "randlib.h" #include <time.h> #include "XT_AMatrix.h" #include "XT_Profile.h" #include "XT_Structures.h" #include "XT_IOMisc.h" /*#include "XT_NHICD.h"*/ #include "omp.h" /*#include "XT_MPI.h"*/ /*#include <mpi.h>*/ #include "XT_VoxUpdate.h" #include "XT_ForwardProject.h" /*#include "XT_MPIIO.h"*/ #include "XT_Debug.h" #include "XT_OffsetError.h" #include "XT_Prior.h" #include "XT_Search.h" #include "XT_CmplxArith.h" #include "XT_CmplxProjEst.h" #include "XT_ObjectInit.h" #include "XT_MagElecDen.h" #include "XT_DensityUpdate.h" int32_t initErrorSinogam(Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr, FFTStruct* fftptr); int updateVoxelsTimeSlices(Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr, int32_t Iter, uint8_t** Mask); /*computes the location of (i,j,k) th element in a 1D array*/ int32_t array_loc_1D (int32_t i, int32_t j, int32_t k, int32_t N_j, int32_t N_k) { return (i*N_j*N_k + j*N_k + k); } /*computes the value of cost function. 'ErrorSino' is the error sinogram*/ Real_t computeCost(Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr) { Real_t cost=0, temp=0, forward=0, prior=0, detdist_r; int32_t i,j,k,p,sino_idx,slice; AMatrixCol AMatrixPtr_X, AMatrixPtr_Y; uint8_t AvgNumXElements = (uint8_t)ceil(3*ObjPtr->delta_x/SinoPtr->delta_r); uint8_t AvgNumYElements = (uint8_t)ceil(3*ObjPtr->delta_y/SinoPtr->delta_r); Real_arr_t ***ErrSino_Flip_x, ***ErrSino_Flip_y, ***ErrSino_Unflip_x, ***ErrSino_Unflip_y; AMatrixPtr_X.values = (Real_t*)get_spc(AvgNumXElements, sizeof(Real_t)); AMatrixPtr_X.index = (int32_t*)get_spc(AvgNumXElements, sizeof(int32_t)); AMatrixPtr_Y.values = (Real_t*)get_spc(AvgNumYElements, sizeof(Real_t)); AMatrixPtr_Y.index = (int32_t*)get_spc(AvgNumYElements, sizeof(int32_t)); ErrSino_Unflip_x = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Nx_p, SinoPtr->N_r, SinoPtr->N_t); ErrSino_Unflip_y = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Ny_p, SinoPtr->N_r, SinoPtr->N_t); memset(&(ErrSino_Unflip_x[0][0][0]), 0, SinoPtr->Nx_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); memset(&(ErrSino_Unflip_y[0][0][0]), 0, SinoPtr->Ny_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); #ifdef VFET_ELEC_RECON ErrSino_Flip_x = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Nx_p, SinoPtr->N_r, SinoPtr->N_t); ErrSino_Flip_y = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Ny_p, SinoPtr->N_r, SinoPtr->N_t); memset(&(ErrSino_Flip_x[0][0][0]), 0, SinoPtr->Nx_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); memset(&(ErrSino_Flip_y[0][0][0]), 0, SinoPtr->Ny_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); #endif /* #pragma omp parallel for private(j, k, sino_idx, slice)*/ for (slice=0; slice<ObjPtr->N_z; slice++){ for (j=0; j<ObjPtr->N_y; j++) { for (k=0; k<ObjPtr->N_x; k++){ for (sino_idx=0; sino_idx < SinoPtr->Nx_p; sino_idx++){ detdist_r = (ObjPtr->y0 + ((Real_t)j+0.5)*ObjPtr->delta_y)*SinoPtr->cosine_x[sino_idx]; detdist_r += -(ObjPtr->z0 + ((Real_t)slice+0.5)*ObjPtr->delta_z)*SinoPtr->sine_x[sino_idx]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_x[sino_idx], &(AMatrixPtr_X), detdist_r); mag_forward_project_voxel (SinoPtr, InpPtr, ObjPtr->MagPotentials[slice][j][k][0], ObjPtr->MagPotentials[slice][j][k][1], ErrSino_Unflip_x, ErrSino_Flip_x, &(AMatrixPtr_X), &(ObjPtr->VoxelLineResp_X[k]), sino_idx, SinoPtr->cosine_x[sino_idx], SinoPtr->sine_x[sino_idx]); } for (sino_idx=0; sino_idx < SinoPtr->Ny_p; sino_idx++){ detdist_r = (ObjPtr->x0 + ((Real_t)k+0.5)*ObjPtr->delta_x)*SinoPtr->cosine_y[sino_idx]; detdist_r += -(ObjPtr->z0 + ((Real_t)slice+0.5)*ObjPtr->delta_z)*SinoPtr->sine_y[sino_idx]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_y[sino_idx], &(AMatrixPtr_Y), detdist_r); /* printf("count = %d, idx = %d, val = %f\n", VoxelLineResponse[slice].count, VoxelLineResponse[slice].index[0], VoxelLineResponse[slice].values[0]);*/ mag_forward_project_voxel (SinoPtr, InpPtr, ObjPtr->MagPotentials[slice][j][k][0], ObjPtr->MagPotentials[slice][j][k][2], ErrSino_Unflip_y, ErrSino_Flip_y, &(AMatrixPtr_Y), &(ObjPtr->VoxelLineResp_Y[j]), sino_idx, SinoPtr->cosine_y[sino_idx], SinoPtr->sine_y[sino_idx]); } } } } free(AMatrixPtr_X.values); free(AMatrixPtr_X.index); free(AMatrixPtr_Y.values); free(AMatrixPtr_Y.index); #pragma omp parallel for private(j, k, temp) reduction(+:forward) for (i = 0; i < SinoPtr->Nx_p; i++) for (j = 0; j < SinoPtr->N_r; j++) for (k = 0; k < SinoPtr->N_t; k++) { temp = (SinoPtr->Data_Unflip_x[i][j][k] - ErrSino_Unflip_x[i][j][k]); forward += temp*temp*InpPtr->Weight; } #pragma omp parallel for private(j, k, temp) reduction(+:forward) for (i = 0; i < SinoPtr->Ny_p; i++) for (j = 0; j < SinoPtr->N_r; j++) for (k = 0; k < SinoPtr->N_t; k++) { temp = (SinoPtr->Data_Unflip_y[i][j][k] - ErrSino_Unflip_y[i][j][k]); forward += temp*temp*InpPtr->Weight; } forward /= 2.0; multifree(ErrSino_Unflip_x, 3); multifree(ErrSino_Unflip_y, 3); /*When computing the cost of the prior term it is important to make sure that you don't include the cost of any pair of neighbors more than once. In this code, a certain sense of causality is used to compute the cost. We also assume that the weghting kernel given by 'Filter' is symmetric. Let i, j and k correspond to the three dimensions. If we go forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to be considered. However, if for the same i, if we go forward to j+1, then all k-1, k, and k+1 should be considered. For same i and j, only the neighbor at k+1 is considred.*/ prior = 0; for (p = 0; p < ObjPtr->N_z; p++) for (j = 0; j < ObjPtr->N_y; j++) for (k = 0; k < ObjPtr->N_x; k++) { temp = ObjPtr->ErrorPotMag[p][j][k][0]*ObjPtr->ErrorPotMag[p][j][k][0]; temp += ObjPtr->ErrorPotMag[p][j][k][1]*ObjPtr->ErrorPotMag[p][j][k][1]; temp += ObjPtr->ErrorPotMag[p][j][k][2]*ObjPtr->ErrorPotMag[p][j][k][2]; prior += InpPtr->ADMM_mu*temp/2; } check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Forward cost = %f\n",forward); check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Prior cost = %f\n",prior); InpPtr->Forward_Cost = forward; InpPtr->Prior_Cost = prior; cost = forward + prior; return cost; } /*computes the value of cost function. 'ErrorSino' is the error sinogram*/ Real_t compute_orig_cost(Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr, FFTStruct* fftptr) { Real_t cost=0, temp=0, forward=0, prior=0, detdist_r; Real_t Diff; Real_arr_t ***ErrSino_Flip_x, ***ErrSino_Flip_y, ***ElecPotentials; int32_t p,i,j,k,cidx,sino_idx,slice; bool j_minus, k_minus, j_plus, k_plus, p_plus; AMatrixCol AMatrixPtr_X, AMatrixPtr_Y; uint8_t AvgNumXElements = (uint8_t)ceil(3*ObjPtr->delta_x/SinoPtr->delta_r); uint8_t AvgNumYElements = (uint8_t)ceil(3*ObjPtr->delta_y/SinoPtr->delta_r); AMatrixPtr_X.values = (Real_t*)get_spc(AvgNumXElements, sizeof(Real_t)); AMatrixPtr_X.index = (int32_t*)get_spc(AvgNumXElements, sizeof(int32_t)); AMatrixPtr_Y.values = (Real_t*)get_spc(AvgNumYElements, sizeof(Real_t)); AMatrixPtr_Y.index = (int32_t*)get_spc(AvgNumYElements, sizeof(int32_t)); Real_arr_t*** ErrSino_Unflip_x = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Nx_p, SinoPtr->N_r, SinoPtr->N_t); Real_arr_t*** ErrSino_Unflip_y = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Ny_p, SinoPtr->N_r, SinoPtr->N_t); Real_arr_t**** MagPotentials = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3); memset(&(ErrSino_Unflip_x[0][0][0]), 0, SinoPtr->Nx_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); memset(&(ErrSino_Unflip_y[0][0][0]), 0, SinoPtr->Ny_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); memset(&(MagPotentials[0][0][0][0]), 0, ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x*3*sizeof(Real_arr_t)); compute_magcrossprodtran (ObjPtr->Magnetization, MagPotentials, ObjPtr->MagFilt, fftptr, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 1); /* #pragma omp parallel for private(j, k, sino_idx, slice)*/ for (slice=0; slice<ObjPtr->N_z; slice++){ for (j=0; j<ObjPtr->N_y; j++) { for (k=0; k<ObjPtr->N_x; k++){ for (sino_idx=0; sino_idx < SinoPtr->Nx_p; sino_idx++){ detdist_r = (ObjPtr->y0 + ((Real_t)j+0.5)*ObjPtr->delta_y)*SinoPtr->cosine_x[sino_idx]; detdist_r += -(ObjPtr->z0 + ((Real_t)slice+0.5)*ObjPtr->delta_z)*SinoPtr->sine_x[sino_idx]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_x[sino_idx], &(AMatrixPtr_X), detdist_r); mag_forward_project_voxel (SinoPtr, InpPtr, MagPotentials[slice][j][k][0], MagPotentials[slice][j][k][1], ErrSino_Unflip_x, ErrSino_Flip_x, &(AMatrixPtr_X), &(ObjPtr->VoxelLineResp_X[k]), sino_idx, SinoPtr->cosine_x[sino_idx], SinoPtr->sine_x[sino_idx]); } for (sino_idx=0; sino_idx < SinoPtr->Ny_p; sino_idx++){ detdist_r = (ObjPtr->x0 + ((Real_t)k+0.5)*ObjPtr->delta_x)*SinoPtr->cosine_y[sino_idx]; detdist_r += -(ObjPtr->z0 + ((Real_t)slice+0.5)*ObjPtr->delta_z)*SinoPtr->sine_y[sino_idx]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_y[sino_idx], &(AMatrixPtr_Y), detdist_r); /* printf("count = %d, idx = %d, val = %f\n", VoxelLineResponse[slice].count, VoxelLineResponse[slice].index[0], VoxelLineResponse[slice].values[0]);*/ mag_forward_project_voxel (SinoPtr, InpPtr, MagPotentials[slice][j][k][0], MagPotentials[slice][j][k][2], ErrSino_Unflip_y, ErrSino_Flip_y, &(AMatrixPtr_Y), &(ObjPtr->VoxelLineResp_Y[j]), sino_idx, SinoPtr->cosine_y[sino_idx], SinoPtr->sine_y[sino_idx]); } } } } free(AMatrixPtr_X.values); free(AMatrixPtr_X.index); free(AMatrixPtr_Y.values); free(AMatrixPtr_Y.index); #pragma omp parallel for private(j, k, temp) reduction(+:forward) for (i = 0; i < SinoPtr->Nx_p; i++) for (j = 0; j < SinoPtr->N_r; j++) for (k = 0; k < SinoPtr->N_t; k++) { temp = (SinoPtr->Data_Unflip_x[i][j][k] - ErrSino_Unflip_x[i][j][k]); forward += temp*temp*InpPtr->Weight; } #pragma omp parallel for private(j, k, temp) reduction(+:forward) for (i = 0; i < SinoPtr->Ny_p; i++) for (j = 0; j < SinoPtr->N_r; j++) for (k = 0; k < SinoPtr->N_t; k++) { temp = (SinoPtr->Data_Unflip_y[i][j][k] - ErrSino_Unflip_y[i][j][k]); forward += temp*temp*InpPtr->Weight; } forward /= 2.0; multifree(ErrSino_Unflip_x, 3); multifree(ErrSino_Unflip_y, 3); multifree(MagPotentials, 4); /*When computing the cost of the prior term it is important to make sure that you don't include the cost of any pair of neighbors more than once. In this code, a certain sense of causality is used to compute the cost. We also assume that the weghting kernel given by 'Filter' is symmetric. Let i, j and k correspond to the three dimensions. If we go forward to i+1, then all neighbors at j-1, j, j+1, k+1, k, k-1 are to be considered. However, if for the same i, if we go forward to j+1, then all k-1, k, and k+1 should be considered. For same i and j, only the neighbor at k+1 is considred.*/ prior = 0; #pragma omp parallel for private(Diff, p, j, k, j_minus, k_minus, p_plus, j_plus, k_plus, cidx) reduction(+:prior) for (p = 0; p < ObjPtr->N_z; p++) for (j = 0; j < ObjPtr->N_y; j++) { for (k = 0; k < ObjPtr->N_x; k++) { j_minus = (j - 1 >= 0)? true : false; k_minus = (k - 1 >= 0)? true : false; p_plus = (p + 1 < ObjPtr->N_z)? true : false; j_plus = (j + 1 < ObjPtr->N_y)? true : false; k_plus = (k + 1 < ObjPtr->N_x)? true : false; if(k_plus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j][k + 1][cidx]); prior += InpPtr->Spatial_Filter[1][1][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } if(j_plus == true) { if(k_minus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j + 1][k - 1][cidx]); prior += InpPtr->Spatial_Filter[1][2][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } for (cidx = 0; cidx < 3; cidx++){ Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j + 1][k][cidx]); prior += InpPtr->Spatial_Filter[1][2][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } if(k_plus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = (ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p][j + 1][k + 1][cidx]); prior += InpPtr->Spatial_Filter[1][2][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } } if (p_plus == true) { if(j_minus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j - 1][k][cidx]; prior += InpPtr->Spatial_Filter[2][0][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p+1][j][k][cidx]; prior += InpPtr->Spatial_Filter[2][1][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } if(j_plus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p+1][j + 1][k][cidx]; prior += InpPtr->Spatial_Filter[2][2][1] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } if(j_minus == true) { if(k_minus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j - 1][k - 1][cidx]; prior += InpPtr->Spatial_Filter[2][0][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } if(k_plus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j - 1][k + 1][cidx]; prior += InpPtr->Spatial_Filter[2][0][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } } if(k_minus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j][k - 1][cidx]; prior += InpPtr->Spatial_Filter[2][1][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } if(j_plus == true) { if(k_minus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j + 1][k - 1][cidx]; prior += InpPtr->Spatial_Filter[2][2][0] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } if(k_plus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j + 1][k + 1][cidx]; prior += InpPtr->Spatial_Filter[2][2][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } } if(k_plus == true) { for (cidx = 0; cidx < 3; cidx++){ Diff = ObjPtr->Magnetization[p][j][k][cidx] - ObjPtr->Magnetization[p + 1][j][k + 1][cidx]; prior += InpPtr->Spatial_Filter[2][1][2] * QGGMRF_Value(Diff,InpPtr->Mag_Sigma_Q[cidx], InpPtr->Mag_Sigma_Q_P[cidx], ObjPtr->Mag_C[cidx]); } } } } } check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Original Forward cost = %f\n",forward); check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Original Prior cost = %f\n",prior); cost = forward + prior; return cost; } /*randomly select the voxels lines which need to be updated along the x-y plane for each z-block and time slice*/ void randomly_select_x_y (ScannedObject* ObjPtr, TomoInputs* InpPtr) { int64_t j, num, n, Index, col, row, *Counter, ArraySize, block, xidx, yidx, zidx; ArraySize = ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x; Counter = (int64_t*)get_spc(ArraySize, sizeof(int64_t)); for (Index = 0; Index < ArraySize; Index++) Counter[Index] = Index; InpPtr->UpdateSelectNum = 0; for (j=0; j<ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x; j++){ Index = floor(random2() * ArraySize); Index = (Index == ArraySize)? ArraySize-1: Index; xidx = Counter[Index] % ObjPtr->N_x; yidx = (Counter[Index] / ObjPtr->N_x) % ObjPtr->N_y; zidx = (Counter[Index] / (ObjPtr->N_x*ObjPtr->N_y)); num = InpPtr->UpdateSelectNum; InpPtr->x_rand_select[num] = xidx; InpPtr->y_rand_select[num] = yidx; InpPtr->z_rand_select[num] = zidx; (InpPtr->UpdateSelectNum)++; Counter[Index] = Counter[ArraySize - 1]; ArraySize--; } if (InpPtr->UpdateSelectNum != ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x) check_warn(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Number of voxels to update does not equal the total number of voxels.\n"); free(Counter); } /*'initErrorSinogram' is used to initialize the error sinogram before start of ICD. It computes e = y - Ax - d. Ax is computed by forward projecting the object x.*/ int32_t initErrorSinogam (Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr, FFTStruct* fftptr) { Real_arr_t*** ErrorSino_Unflip_x = SinoPtr->ErrorSino_Unflip_x; Real_arr_t*** ErrorSino_Unflip_y = SinoPtr->ErrorSino_Unflip_y; Real_arr_t*** ErrorSino_Flip_x = SinoPtr->ErrorSino_Flip_x; Real_arr_t*** ErrorSino_Flip_y = SinoPtr->ErrorSino_Flip_y; Real_t unflipavg = 0, flipavg = 0, potxavg = 0, potyavg = 0, potzavg = 0, potrhoavg = 0, detdist_r; int32_t i, j, k, sino_idx, slice, flag = 0; AMatrixCol AMatrixPtr_X, AMatrixPtr_Y; uint8_t AvgNumXElements = (uint8_t)ceil(3*ObjPtr->delta_x/SinoPtr->delta_r); uint8_t AvgNumYElements = (uint8_t)ceil(3*ObjPtr->delta_y/SinoPtr->delta_r); /* char error_file[100];*/ AMatrixPtr_X.values = (Real_t*)get_spc(AvgNumXElements, sizeof(Real_t)); AMatrixPtr_X.index = (int32_t*)get_spc(AvgNumXElements, sizeof(int32_t)); AMatrixPtr_Y.values = (Real_t*)get_spc(AvgNumYElements, sizeof(Real_t)); AMatrixPtr_Y.index = (int32_t*)get_spc(AvgNumYElements, sizeof(int32_t)); memset(&(ErrorSino_Unflip_x[0][0][0]), 0, SinoPtr->Nx_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); memset(&(ErrorSino_Unflip_y[0][0][0]), 0, SinoPtr->Ny_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(Real_arr_t)); memset(&(ObjPtr->MagPotentials[0][0][0][0]), 0, ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x*3*sizeof(Real_arr_t)); memset(&(ObjPtr->ErrorPotMag[0][0][0][0]), 0, ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x*3*sizeof(Real_arr_t)); memset(&(ObjPtr->MagPotDual[0][0][0][0]), 0, ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x*3*sizeof(Real_arr_t)); compute_magcrossprodtran (ObjPtr->Magnetization, ObjPtr->MagPotentials, ObjPtr->MagFilt, fftptr, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 1); for (i = 0; i < ObjPtr->N_z; i++) for (j = 0; j < ObjPtr->N_y; j++) for (k = 0; k < ObjPtr->N_x; k++) { potzavg += ObjPtr->MagPotentials[i][j][k][0]; potyavg += ObjPtr->MagPotentials[i][j][k][1]; potxavg += ObjPtr->MagPotentials[i][j][k][2]; } potzavg /= (ObjPtr->N_x*ObjPtr->N_y*ObjPtr->N_z); potyavg /= (ObjPtr->N_x*ObjPtr->N_y*ObjPtr->N_z); potxavg /= (ObjPtr->N_x*ObjPtr->N_y*ObjPtr->N_z); check_debug(InpPtr->node_rank == 0, InpPtr->debug_file_ptr, "Average of potentials after forward projection are (x, y, z) = (%f, %f, %f)\n", potxavg, potyavg, potzavg); /* #pragma omp parallel for private(j, k, sino_idx, slice)*/ for (slice=0; slice<ObjPtr->N_z; slice++){ for (j=0; j<ObjPtr->N_y; j++) { for (k=0; k<ObjPtr->N_x; k++){ for (sino_idx=0; sino_idx < SinoPtr->Nx_p; sino_idx++){ detdist_r = (ObjPtr->y0 + ((Real_t)j+0.5)*ObjPtr->delta_y)*SinoPtr->cosine_x[sino_idx]; detdist_r += -(ObjPtr->z0 + ((Real_t)slice+0.5)*ObjPtr->delta_z)*SinoPtr->sine_x[sino_idx]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_x[sino_idx], &(AMatrixPtr_X), detdist_r); mag_forward_project_voxel (SinoPtr, InpPtr, ObjPtr->MagPotentials[slice][j][k][0], ObjPtr->MagPotentials[slice][j][k][1], ErrorSino_Unflip_x, ErrorSino_Flip_x, &(AMatrixPtr_X), &(ObjPtr->VoxelLineResp_X[k]), sino_idx, SinoPtr->cosine_x[sino_idx], SinoPtr->sine_x[sino_idx]); } for (sino_idx=0; sino_idx < SinoPtr->Ny_p; sino_idx++){ detdist_r = (ObjPtr->x0 + ((Real_t)k+0.5)*ObjPtr->delta_x)*SinoPtr->cosine_y[sino_idx]; detdist_r += -(ObjPtr->z0 + ((Real_t)slice+0.5)*ObjPtr->delta_z)*SinoPtr->sine_y[sino_idx]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_y[sino_idx], &(AMatrixPtr_Y), detdist_r); /* printf("count = %d, idx = %d, val = %f\n", VoxelLineResponse[slice].count, VoxelLineResponse[slice].index[0], VoxelLineResponse[slice].values[0]);*/ mag_forward_project_voxel (SinoPtr, InpPtr, ObjPtr->MagPotentials[slice][j][k][0], ObjPtr->MagPotentials[slice][j][k][2], ErrorSino_Unflip_y, ErrorSino_Flip_y, &(AMatrixPtr_Y), &(ObjPtr->VoxelLineResp_Y[j]), sino_idx, SinoPtr->cosine_y[sino_idx], SinoPtr->sine_y[sino_idx]); } } } } #pragma omp parallel for private(j, k) reduction(+:unflipavg,flipavg) for(i = 0; i < SinoPtr->Nx_p; i++) for(j = 0; j < SinoPtr->N_r; j++) for(k = 0; k < SinoPtr->N_t; k++) { unflipavg += ErrorSino_Unflip_x[i][j][k]; ErrorSino_Unflip_x[i][j][k] = SinoPtr->Data_Unflip_x[i][j][k] - ErrorSino_Unflip_x[i][j][k]; } #pragma omp parallel for private(j, k) reduction(+:unflipavg,flipavg) for(i = 0; i < SinoPtr->Ny_p; i++) for(j = 0; j < SinoPtr->N_r; j++) for(k = 0; k < SinoPtr->N_t; k++) { unflipavg += ErrorSino_Unflip_y[i][j][k]; ErrorSino_Unflip_y[i][j][k] = SinoPtr->Data_Unflip_y[i][j][k] - ErrorSino_Unflip_y[i][j][k]; } unflipavg = unflipavg/(SinoPtr->N_r*SinoPtr->N_t*(SinoPtr->Nx_p+SinoPtr->Ny_p)); check_debug(InpPtr->node_rank == 0, InpPtr->debug_file_ptr, "Average of unflipped component of forward projection in node %d is %f\n", InpPtr->node_rank, unflipavg); free(AMatrixPtr_X.values); free(AMatrixPtr_X.index); free(AMatrixPtr_Y.values); free(AMatrixPtr_Y.index); return (flag); } /*Implements mutithreaded shared memory parallelization using OpenMP and splits work among threads. Each thread gets a certain time slice and z block to update. Multithreading is done within the z-blocks assigned to each node. ErrorSino - Error sinogram Iter - Present iteration number MagUpdateMap - Magnitude update map containing the magnitude of update of each voxel Mask - If a certain element is true then the corresponding voxel is updated*/ int updateVoxelsTimeSlices(Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr, int32_t Iter, uint8_t** Mask) { Real_t /*AverageUpdate = 0, tempUpdate, avg_update_percentage, total_vox_mag = 0.0, vox_mag = 0.0, */magpot_update = 0, elecpot_update = 0, magpot_sum = 0, elecpot_sum = 0, magpot_update_tot = 0, elecpot_update_tot = 0, magpot_sum_tot = 0, elecpot_sum_tot = 0; int32_t xyz_start, xyz_end, j, K, block, idx; /* Real_t tempTotPix = 0, total_pix = 0;*/ /*MPI_Request mag_send_reqs, mag_recv_reqs, elec_send_reqs, elec_recv_reqs;*/ randomly_select_x_y (ObjPtr, InpPtr); /* K = ObjPtr->N_time*ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x; K = (K - total_zero_count)/(ObjPtr->gamma*K);*/ K = ObjPtr->NHICD_Iterations; check_debug(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Number of NHICD iterations is %d.\n", K); for (j = 0; j < K; j++) { /* total_vox_mag = 0.0;*/ /*#pragma omp parallel for private(block, idx, xy_start, xy_end) reduction(+:total_vox_mag)*/ magpot_update = 0; magpot_sum = 0; xyz_start = j*floor(InpPtr->UpdateSelectNum/K); xyz_end = (j + 1)*floor(InpPtr->UpdateSelectNum/K) - 1; xyz_end = (j == K - 1) ? InpPtr->UpdateSelectNum - 1: xyz_end; /* printf ("Loop 1 Start - j = %d, i = %d, idx = %d, z_start = %d, z_stop = %d, xy_start = %d, xy_end = %d\n", j, i, idx, z_start[i][idx], z_stop[i][idx], xy_start, xy_end);*/ updateVoxels (xyz_start, xyz_end, InpPtr->x_rand_select, InpPtr->y_rand_select, InpPtr->z_rand_select, SinoPtr, ObjPtr, InpPtr, SinoPtr->ErrorSino_Unflip_x, SinoPtr->ErrorSino_Flip_x, SinoPtr->ErrorSino_Unflip_y, SinoPtr->ErrorSino_Flip_y, SinoPtr->DetectorResponse_x, SinoPtr->DetectorResponse_y, /*VoxelLineResponse,*/ Iter, ObjPtr->MagPotUpdateMap, &magpot_update, &magpot_sum, Mask); magpot_update_tot += magpot_update; magpot_sum_tot += magpot_sum; /*MPI_Send_Recv_Z_Slices (ObjPtr, InpPtr, &mag_send_reqs, &elec_send_reqs, &mag_recv_reqs, &elec_recv_reqs, 1);*/ /* MPI_Wait_Z_Slices (ObjPtr, InpPtr, &mag_send_reqs, &elec_send_reqs, &mag_recv_reqs, &elec_recv_reqs, 1);*/ VSC_based_Voxel_Line_Select(ObjPtr, InpPtr, ObjPtr->MagPotUpdateMap); if (Iter > 1 && InpPtr->no_NHICD == 0) { /*#pragma omp parallel for private(block, idx)*/ updateVoxels (0, InpPtr->NHICDSelectNum-1, InpPtr->x_NHICD_select, InpPtr->y_NHICD_select, InpPtr->z_NHICD_select, SinoPtr, ObjPtr, InpPtr, SinoPtr->ErrorSino_Unflip_x, SinoPtr->ErrorSino_Flip_x, SinoPtr->ErrorSino_Unflip_y, SinoPtr->ErrorSino_Flip_y, SinoPtr->DetectorResponse_x, SinoPtr->DetectorResponse_y, Iter, ObjPtr->MagPotUpdateMap, &magpot_update, &magpot_sum, Mask); } } /*MPI_Allreduce(&AverageUpdate, &tempUpdate, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_pix, &tempTotPix, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&total_vox_mag, &vox_mag, 1, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); AverageUpdate = tempUpdate/(tempTotPix); check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Average voxel update over all voxels is %e, total voxels is %e.\n", AverageUpdate, tempTotPix); */ /* multifree(offset_numerator,2); multifree(offset_denominator,2);*/ /* avg_update_percentage = 100*tempUpdate/vox_mag;*/ magpot_update = 100*magpot_update_tot/magpot_sum_tot; check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Percentage average magnitude of voxel updates of (Magnetic) potential is (%e).\n", magpot_update); if (magpot_update < InpPtr->StopThreshold) { check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Percentage average magnitude of voxel updates is less than convergence threshold.\n"); return (1); } return(0); } /*ICD_BackProject calls the ICD optimization function repeatedly till the stopping criteria is met.*/ int ICD_BackProject(Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr, FFTStruct* fftptr) { #ifndef NO_COST_CALCULATE Real_t cost, cost_0_iter, cost_last_iter, percentage_change_in_cost = 0, orig_cost_last = 0, orig_cost = 0; char costfile[100] = COST_FILENAME, origcostfile[100] = ORIG_COST_FILENAME; #endif Real_t x, y, DualMag[3], DualElec, mag_primal_res = 0, elec_primal_res = 0; int32_t t, i, j, flag = 0, Iter, k, HeadIter; int dimTiff[4]; time_t start; char detect_file[100] = DETECTOR_RESPONSE_FILENAME; /*char MagPotUpdateMapFile[100] = MAGPOT_UPDATE_MAP_FILENAME; char ElecPotUpdateMapFile[100] = ELECPOT_UPDATE_MAP_FILENAME;*/ uint8_t **Mask; SinoPtr->ZLineResponse = (Real_arr_t *)get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t)); ZLineResponseProfile (SinoPtr, ObjPtr, InpPtr); ObjPtr->VoxelLineResp_X = (AMatrixCol*)get_spc(ObjPtr->N_x, sizeof(AMatrixCol)); uint8_t AvgNumXElements = (uint8_t)((ObjPtr->delta_x/SinoPtr->delta_t) + 2); for (t = 0; t < ObjPtr->N_x; t++){ ObjPtr->VoxelLineResp_X[t].values = (Real_t*)get_spc(AvgNumXElements, sizeof(Real_t)); ObjPtr->VoxelLineResp_X[t].index = (int32_t*)get_spc(AvgNumXElements, sizeof(int32_t)); } storeVoxelLineResponse(ObjPtr->VoxelLineResp_X, SinoPtr, ObjPtr->x0, ObjPtr->delta_x, ObjPtr->N_x); ObjPtr->VoxelLineResp_Y = (AMatrixCol*)get_spc(ObjPtr->N_y, sizeof(AMatrixCol)); uint8_t AvgNumYElements = (uint8_t)((ObjPtr->delta_y/SinoPtr->delta_t) + 2); for (t = 0; t < ObjPtr->N_y; t++){ ObjPtr->VoxelLineResp_Y[t].values = (Real_t*)get_spc(AvgNumYElements, sizeof(Real_t)); ObjPtr->VoxelLineResp_Y[t].index = (int32_t*)get_spc(AvgNumYElements, sizeof(int32_t)); } storeVoxelLineResponse(ObjPtr->VoxelLineResp_Y, SinoPtr, ObjPtr->y0, ObjPtr->delta_y, ObjPtr->N_y); #ifdef POSITIVITY_CONSTRAINT check_debug(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Enforcing positivity constraint\n"); #endif /* ObjPtr->MagPotUpdateMap = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, InpPtr->num_z_blocks, ObjPtr->N_y, ObjPtr->N_x); ObjPtr->ElecPotUpdateMap = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, InpPtr->num_z_blocks, ObjPtr->N_y, ObjPtr->N_x);*/ SinoPtr->DetectorResponse_x = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinoPtr->Nx_p, DETECTOR_RESPONSE_BINS + 1); SinoPtr->DetectorResponse_y = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinoPtr->Ny_p, DETECTOR_RESPONSE_BINS + 1); SinoPtr->ErrorSino_Unflip_x = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Nx_p, SinoPtr->N_r, SinoPtr->N_t); SinoPtr->ErrorSino_Unflip_y = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, SinoPtr->Ny_p, SinoPtr->N_r, SinoPtr->N_t); ObjPtr->ErrorPotMag = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3); Mask = (uint8_t**)multialloc(sizeof(uint8_t), 2, ObjPtr->N_y, ObjPtr->N_x); /* memset(&(ObjPtr->MagPotUpdateMap[0][0][0]), 0, InpPtr->num_z_blocks*ObjPtr->N_y*ObjPtr->N_x*sizeof(Real_arr_t)); memset(&(ObjPtr->ElecPotUpdateMap[0][0][0]), 0, InpPtr->num_z_blocks*ObjPtr->N_y*ObjPtr->N_x*sizeof(Real_arr_t));*/ /* omp_set_num_threads(InpPtr->num_threads);*/ check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Number of CPU cores is %d\n", (int)omp_get_num_procs()); /* check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "ICD_BackProject: Number of threads is %d\n", InpPtr->num_threads) ;*/ for (j = 0; j < ObjPtr->N_y; j++) for (k = 0; k < ObjPtr->N_x; k++){ x = ObjPtr->x0 + ((Real_t)k + 0.5)*ObjPtr->delta_x; y = ObjPtr->y0 + ((Real_t)j + 0.5)*ObjPtr->delta_y; if (x*x + y*y < InpPtr->radius_obj*InpPtr->radius_obj) Mask[j][k] = 1; else Mask[j][k] = 0; } DetectorResponseProfile (SinoPtr, ObjPtr, InpPtr); dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = SinoPtr->Nx_p; dimTiff[3] = DETECTOR_RESPONSE_BINS+1; sprintf(detect_file, "%s_n%d", detect_file, InpPtr->node_rank); if (InpPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff (detect_file, dimTiff, 0, 1, 2, 3, &(SinoPtr->DetectorResponse_x[0][0]), 0, 0, 1, InpPtr->debug_file_ptr)) goto error; dimTiff[0] = 1; dimTiff[1] = 1; dimTiff[2] = SinoPtr->Ny_p; dimTiff[3] = DETECTOR_RESPONSE_BINS+1; sprintf(detect_file, "%s_n%d", detect_file, InpPtr->node_rank); if (InpPtr->Write2Tiff == 1) if (WriteMultiDimArray2Tiff (detect_file, dimTiff, 0, 1, 2, 3, &(SinoPtr->DetectorResponse_y[0][0]), 0, 0, 1, InpPtr->debug_file_ptr)) goto error; start = time(NULL); if (initObject(SinoPtr, ObjPtr, InpPtr)) goto error; if (initErrorSinogam(SinoPtr, ObjPtr, InpPtr, fftptr)) goto error; /* if (init_minmax_object (ObjPtr, InpPtr)) goto error;*/ check_debug(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Time taken to initialize object and compute error sinogram = %fmins\n", difftime(time(NULL),start)/60.0); start=time(NULL); orig_cost_last = compute_orig_cost(SinoPtr, ObjPtr, InpPtr, fftptr); check_info(InpPtr->node_rank == 0, InpPtr->debug_file_ptr, "HeadIter = 0: The original cost value is %f.\n", orig_cost_last); if (InpPtr->node_rank == 0) Write2Bin (origcostfile, 1, 1, 1, 1, sizeof(Real_t), &orig_cost_last, InpPtr->debug_file_ptr); for (HeadIter = 1; HeadIter <= InpPtr->Head_MaxIter; HeadIter++) { reconstruct_magnetization(ObjPtr, InpPtr, fftptr); #ifndef NO_COST_CALCULATE cost = computeCost(SinoPtr,ObjPtr,InpPtr); cost_0_iter = cost; cost_last_iter = cost; check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "------------- Iteration 0, Cost = %f------------\n",cost); if (InpPtr->node_rank == 0) Write2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, InpPtr->debug_file_ptr); #endif /*Cost calculation endif*/ for (Iter = 1; Iter <= InpPtr->NumIter; Iter++) { flag = updateVoxelsTimeSlices (SinoPtr, ObjPtr, InpPtr, Iter, Mask); if (InpPtr->WritePerIter == 1) if (write_ObjectProjOff2TiffBinPerIter (SinoPtr, ObjPtr, InpPtr)) goto error; #ifndef NO_COST_CALCULATE cost = computeCost(SinoPtr,ObjPtr,InpPtr); percentage_change_in_cost = ((cost - cost_last_iter)/(cost - cost_0_iter))*100.0; check_debug(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Percentage change in cost is %f.\n", percentage_change_in_cost); check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "------------- Iteration = %d, Cost = %f, Time since start of ICD = %fmins ------------\n",Iter,cost,difftime(time(NULL),start)/60.0); if (InpPtr->node_rank == 0) Append2Bin (costfile, 1, 1, 1, 1, sizeof(Real_t), &cost, InpPtr->debug_file_ptr); if (cost > cost_last_iter) check_info(InpPtr->node_rank == 0, InpPtr->debug_file_ptr, "ERROR: Cost value increased.\n"); cost_last_iter = cost; /*if (percentage_change_in_cost < InpPtr->cost_thresh && flag != 0 && Iter > 1){*/ if (flag != 0 && Iter > 1){ check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #else check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "-------------ICD_BackProject: ICD Iter = %d, time since start of ICD = %fmins------------.\n",Iter,difftime(time(NULL),start)/60.0); if (flag != 0 && Iter > 1){ check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Convergence criteria is met.\n"); break; } #endif flag = fflush(InpPtr->debug_file_ptr); if (flag != 0) check_warn(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Cannot flush buffer.\n"); } mag_primal_res = 0; elec_primal_res = 0; for (i = 0; i < ObjPtr->N_z; i++) for (j = 0; j < ObjPtr->N_y; j++) for (k = 0; k < ObjPtr->N_x; k++) { DualMag[0] = ObjPtr->MagPotDual[i][j][k][0]; DualMag[1] = ObjPtr->MagPotDual[i][j][k][1]; DualMag[2] = ObjPtr->MagPotDual[i][j][k][2]; ObjPtr->MagPotDual[i][j][k][0] = -ObjPtr->ErrorPotMag[i][j][k][0]; ObjPtr->MagPotDual[i][j][k][1] = -ObjPtr->ErrorPotMag[i][j][k][1]; ObjPtr->MagPotDual[i][j][k][2] = -ObjPtr->ErrorPotMag[i][j][k][2]; ObjPtr->ErrorPotMag[i][j][k][0] -= (ObjPtr->MagPotDual[i][j][k][0] - DualMag[0]); ObjPtr->ErrorPotMag[i][j][k][1] -= (ObjPtr->MagPotDual[i][j][k][1] - DualMag[1]); ObjPtr->ErrorPotMag[i][j][k][2] -= (ObjPtr->MagPotDual[i][j][k][2] - DualMag[2]); mag_primal_res += fabs(ObjPtr->ErrorPotMag[i][j][k][0] + ObjPtr->MagPotDual[i][j][k][0]); mag_primal_res += fabs(ObjPtr->ErrorPotMag[i][j][k][1] + ObjPtr->MagPotDual[i][j][k][1]); mag_primal_res += fabs(ObjPtr->ErrorPotMag[i][j][k][2] + ObjPtr->MagPotDual[i][j][k][2]); } orig_cost = compute_orig_cost(SinoPtr, ObjPtr, InpPtr, fftptr); check_info(InpPtr->node_rank == 0, InpPtr->debug_file_ptr, "HeadIter = %d: The original cost value is %f. The decrease in original cost is %f.\n", HeadIter, orig_cost, orig_cost_last - orig_cost); mag_primal_res /= (ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x); check_info(InpPtr->node_rank == 0, InpPtr->debug_file_ptr, "Mag average primal residual is %e.\n", mag_primal_res); if (InpPtr->node_rank == 0) Append2Bin (origcostfile, 1, 1, 1, 1, sizeof(Real_t), &orig_cost, InpPtr->debug_file_ptr); if (orig_cost > orig_cost_last) check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "WARNING: Cost of original cost function increased!\n"); orig_cost_last = orig_cost; if (write_ObjectProjOff2TiffBinPerIter (SinoPtr, ObjPtr, InpPtr)) goto error; /*if (avg_head_update < InpPtr->Head_threshold && HeadIter > 1) break;*/ } /* int32_t size = InpPtr->num_z_blocks*ObjPtr->N_y*ObjPtr->N_x; if (write_SharedBinFile_At (MagPotUpdateMapFile, &(ObjPtr->MagPotUpdateMap[0][0][0]), InpPtr->node_rank*size, size, InpPtr->debug_file_ptr)) goto error; if (write_SharedBinFile_At (ElecPotUpdateMapFile, &(ObjPtr->ElecPotUpdateMap[0][0][0]), InpPtr->node_rank*size, size, InpPtr->debug_file_ptr)) goto error;*/ if (InpPtr->Write2Tiff == 1) { dimTiff[0] = 1; dimTiff[1] = SinoPtr->Nx_p; dimTiff[2] = SinoPtr->N_r; dimTiff[3] = SinoPtr->N_t; if (WriteMultiDimArray2Tiff (ERRORSINO_UNFLIP_X_FILENAME, dimTiff, 0, 1, 2, 3, &(SinoPtr->ErrorSino_Unflip_x[0][0][0]), 0, 0, 1, InpPtr->debug_file_ptr)) goto error; dimTiff[0] = 1; dimTiff[1] = SinoPtr->Ny_p; dimTiff[2] = SinoPtr->N_r; dimTiff[3] = SinoPtr->N_t; if (WriteMultiDimArray2Tiff (ERRORSINO_UNFLIP_Y_FILENAME, dimTiff, 0, 1, 2, 3, &(SinoPtr->ErrorSino_Unflip_y[0][0][0]), 0, 0, 1, InpPtr->debug_file_ptr)) goto error; } multifree(SinoPtr->ErrorSino_Unflip_x, 3); multifree(SinoPtr->ErrorSino_Unflip_y, 3); multifree(ObjPtr->ErrorPotMag, 4); multifree(SinoPtr->DetectorResponse_x, 2); multifree(SinoPtr->DetectorResponse_y, 2); multifree(Mask, 2); for (t = 0; t < ObjPtr->N_x; t++){ free(ObjPtr->VoxelLineResp_X[t].values); free(ObjPtr->VoxelLineResp_X[t].index); } free(ObjPtr->VoxelLineResp_X); for (t = 0; t < ObjPtr->N_y; t++){ free(ObjPtr->VoxelLineResp_Y[t].values); free(ObjPtr->VoxelLineResp_Y[t].index); } free(ObjPtr->VoxelLineResp_Y); free(SinoPtr->ZLineResponse); /*multifree(ObjPtr->MagPotUpdateMap, 3); multifree(ObjPtr->ElecPotUpdateMap, 3);*/ check_debug(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Finished running ICD_BackProject.\n"); flag = fflush(InpPtr->debug_file_ptr); if (flag != 0) check_warn(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Cannot flush buffer.\n"); return(0); error: multifree(SinoPtr->ErrorSino_Unflip_x, 3); multifree(SinoPtr->ErrorSino_Flip_x, 3); multifree(SinoPtr->DetectorResponse_x,2); multifree(SinoPtr->DetectorResponse_y,2); multifree(Mask,2); /* multifree(ObjPtr->MagPotUpdateMap, 3); multifree(ObjPtr->ElecPotUpdateMap, 3);*/ return(-1); }
sdaxpy.c
/* * ======================================================================================= * * Author: Jan Eitzinger (je), jan.eitzinger@fau.de * Copyright (c) 2020 RRZE, University Erlangen-Nuremberg * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ======================================================================================= */ #include <timing.h> double sdaxpy( double * restrict a, double * restrict b, double * restrict c, int N ) { double S, E; S = getTimeStamp(); #pragma omp parallel for schedule(static) for (int i=0; i<N; i++) { a[i] = a[i] + b[i] * c[i]; } E = getTimeStamp(); return E-S; }
MultiDimensionalArray.h
#pragma once #include <assert.h> #include <array> #include <vector> #include <functional> #include <memory> #include <iomanip> #include <numeric> #include <execution> #include <iostream> namespace Iyathuum { enum memoryLayout { Reversed, Normal }; template <typename Type, size_t Dimension, memoryLayout Layout = Normal, class = std::make_index_sequence<Dimension>> class MultiDimensionalArray; //Just takes care of the data storage. Should not contain any comfort features //This should be done by helper classes template <typename Type, size_t Dimension, memoryLayout Layout, size_t... Is> class MultiDimensionalArray<Type, Dimension, Layout, std::index_sequence<Is...> > { private: template <size_t > using input_ = size_t; public: MultiDimensionalArray(input_<Is>... vals) { _size = 1; size_t counter = 0; for (auto&& x : { vals... }) { _dimension[counter] = x; _size *= x; counter++; } _data.resize(getSize()); assert(getSize() != 0); } MultiDimensionalArray(std::array<size_t,Dimension> dim) { assert(dim.size() == Dimension); _size = 1; for (int i = 0; i < Dimension; i++) { _dimension[i] = dim[i]; _size *= dim[i]; } _data.resize(getSize()); assert(getSize() != 0); } MultiDimensionalArray(std::vector<size_t> dim) { assert(dim.size() == Dimension); _size = 1; for (int i = 0; i < dim.size(); i++) { _dimension[i] = dim[i]; _size *= dim[i]; } _data.resize(getSize()); assert(getSize() != 0); } MultiDimensionalArray(const MultiDimensionalArray<Type, Dimension>& copy) { _size = copy._size; _data.resize(getSize()); for(size_t i = 0;i < Dimension;i++) _dimension[i] = copy._dimension[i]; std::memcpy(_data.data(), copy._data.data(), (sizeof _data) * getSize()); std::memcpy(&_dimension, &copy._dimension, (sizeof _dimension) * Dimension); assert(getSize() != 0); } virtual ~MultiDimensionalArray() { }; Type& getRef(input_<Is>... vals) { return _data[transform(vals...)]; } Type getVal(input_<Is>... vals) const { return _data[transform(vals...)]; } Type& getRef(std::array<size_t, Dimension> vals) { auto pos = transformA(vals); return _data[pos]; } Type getVal(std::array<size_t, Dimension> vals) { auto pos = transformA(vals); return _data[pos]; } Type& get_linearRef(size_t pos) { return _data[pos]; } Type get_linearVal(size_t pos) const { return _data[pos]; } void set_linear(size_t pos, const Type val) { _data[pos] = val; } void fill(Type t) { apply([t](size_t pos, Type& a) {a = t; }); //for (size_t i = 0; i < _size; i++) { // get(i) = t; //} } size_t transform(input_<Is>... vals) const { //use size_t size_t result = 0; size_t productSum = 1; std::array<size_t, sizeof...(vals)> list = { vals... }; if constexpr (Reversed == Layout) { for (int i = (sizeof...(vals)) - 1; i >= 0; i--) { result += list[i] * productSum; productSum *= _dimension[i]; } } else { for (int i = 0; i < (sizeof...(vals)); i++) { result += list[i] * productSum; productSum *= _dimension[i]; } } return result; } size_t transformA(std::array<size_t, Dimension> coord) { size_t result = 0; size_t productSum = 1; if constexpr (Reversed == Layout) { for (int i = Dimension - 1; i >= 0; i--) { result += coord[i] * productSum; productSum *= _dimension[i]; } } else { for (int i = 0; i < Dimension; i++) { result += coord[i] * productSum; productSum *= _dimension[i]; } } return result; } std::array<size_t, Dimension> transformToCoordiante(size_t input) { //Inverse of transformA size_t pos = input; std::array<size_t, Dimension> result; if constexpr (Reversed == Layout) { for (size_t i = Dimension - 1; i >= 0; i--) { result[i] = pos % _dimension[i]; pos -= result[i]; pos /= _dimension[i]; } } else { for (size_t i = 0; i < Dimension; i++) { result[i] = pos % _dimension[i]; pos -= result[i]; pos /= _dimension[i]; } } return result; } size_t getSize() const { return _size; } std::vector<size_t> getDimensionVector() const { std::vector<size_t> result; for (int i = 0; i < Dimension; i++) result.push_back(getDimension(i)); return result; } size_t getDimension(size_t dimension) const { assert(dimension >= 0); assert(dimension < Dimension); return _dimension[dimension]; } template<typename Type2> std::shared_ptr<MultiDimensionalArray<Type2, Dimension>> map(std::function<Type2(Type const&)> func) { std::vector<size_t> dimension; for (size_t i = 0; i < Dimension; i++) dimension.push_back(_dimension[i]); std::shared_ptr<MultiDimensionalArray<Type2, Dimension>> result = std::make_shared<MultiDimensionalArray<Type2, Dimension>>(dimension); int chunkSize = 1000; int chunks = ((int)getSize() / chunkSize) +1; #pragma omp parallel for for (int chunk = 0; chunk < chunks; chunk++) { size_t min = chunk * chunkSize; size_t max = (chunk + 1) * chunkSize; for (size_t i = min; i < max && i < getSize(); i++) { Type2 c = func(this->get_linearVal(i)); result->set_linear(i,c); } } return result; } void apply(std::function<void(size_t, Type&)> func) { int chunkSize = 1000; int chunks = ((int)getSize() / chunkSize)+1; //if (chunks == 0) chunks = 1; #pragma omp parallel for for (int chunk = 0; chunk < chunks; chunk++) { size_t min = chunk * chunkSize; size_t max = (chunk + 1) * chunkSize; if (max > getSize()) max = getSize(); for (size_t i = min; i < max; i++) func(i, get_linearRef(i)); } } void apply(std::function<void(const std::array<size_t, Dimension>&, Type&)> func) { int chunkSize = 1000; int chunks = ((int)getSize() / chunkSize)+1; //if (chunks == 0) chunks = 1; #pragma omp parallel for for (int chunk = 0; chunk < chunks; chunk++) { size_t min = chunk * chunkSize; size_t max = (chunk + 1) * chunkSize; for (size_t i = min; i < max && i < getSize(); i++) func(transformToCoordiante(i), get_linearRef(i)); } } //on subset void applySubset(std::array<size_t, Dimension> start, std::array<size_t, Dimension> size, std::function<void(std::array<size_t, Dimension>, Type&)> func) { for (size_t i = 0; i < Dimension; i++) if (start[i] + size[i] >= getDimension(i)) size[i] = getDimension(i) - start[i] - 1; size_t xSize; size_t startDimension; if constexpr (Layout == Reversed) startDimension = Dimension - 1; else startDimension = 0; xSize = size[startDimension]; #pragma omp parallel for for (int64_t x = 0; x < xSize; x++) { std::array<size_t, Dimension> currentPosition = start; currentPosition[startDimension] = start[startDimension] + x; apply_recursive(start, size, func, currentPosition, startDimension); } } private: void apply_recursive(const std::array<size_t, Dimension> start, const std::array<size_t, Dimension> size, std::function<void(std::array<size_t, Dimension>, Type&)> func, std::array<size_t, Dimension> currentPosition, size_t currentDimension) { if ((Layout == Reversed && currentDimension == 0) || (Layout == Normal && currentDimension == Dimension - 1)) { for (int64_t i = 0; i < size[currentDimension]; i++) { std::array<size_t, Dimension> pos = currentPosition; pos[currentDimension] += i; size_t index = transformA(pos); func(pos, get_linearRef(index)); } } else { for (int64_t i = 0; i < size[currentDimension]; i++) { std::array<size_t, Dimension> pos = currentPosition; pos[currentDimension] += i; if constexpr (Layout == Reversed) apply_recursive(start, size, func, pos, currentDimension - 1); else apply_recursive(start, size, func, pos, currentDimension + 1); } } } public: Type reduce(Type startVal, std::function<Type(const Type, const Type)> binOp) { std::reduce(std::execution::par, _data.begin(), _data.end(), startVal,binOp); } void write() { const int numWidth = 14; const char separator = ' '; assert(Dimension == 2); for (size_t y = 0; y < getDimension(1); y++) { for (size_t x = 0; x < getDimension(0); x++) std::cout << std::left << std::setw(numWidth) << std::setfill(separator) << getVal(x, y); std::cout << std::endl; } } Type* data() { return _data.data(); } std::vector<Type>& vector() { return _data; } private: std::vector<Type> _data; size_t _dimension[Dimension]; size_t _size; Type& get(size_t position) { assert(position >= 0); assert(position <= getSize()); return _data[position]; } }; }
ark_heat1D_adapt_ompdev.c
/*--------------------------------------------------------------- * Programmer(s): Shelby Lockhart @ LLNL *--------------------------------------------------------------- * Based on the serial code ark_heat1D_adapt.c developed * by Daniel R. Reynolds and parallelized with OpenMP 4.5 *--------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *--------------------------------------------------------------- * Example problem: * * The following test simulates a simple 1D heat equation, * u_t = k*u_xx + f * for t in [0, 10], x in [0, 1], with initial conditions * u(0,x) = 0 * Dirichlet boundary conditions, i.e. * u_t(t,0) = u_t(t,1) = 0, * and a heating term of the form * f = 2*exp(-200*(x-0.25)*(x-0.25)) * - exp(-400*(x-0.7)*(x-0.7)) * + exp(-500*(x-0.4)*(x-0.4)) * - 2*exp(-600*(x-0.55)*(x-0.55)); * * The spatial derivatives are computed using a three-point * centered stencil (second order for a uniform mesh). The data * is initially uniformly distributed over N points in the interval * [0, 1], but as the simulation proceeds the mesh is adapted. * * This program solves the problem with a DIRK method, solved with * a Newton iteration and SUNLinSol_PCG linear solver, with a * user-supplied Jacobian-vector product routine. *---------------------------------------------------------------*/ /* Header files */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */ #include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */ #include <sunlinsol/sunlinsol_pcg.h> /* access to PCG SUNLinearSolver */ #include <sundials/sundials_types.h> /* defs. of realtype, sunindextype, etc */ #include <sundials/sundials_math.h> /* def. of SUNRsqrt, etc. */ #ifdef _OPENMP #include <omp.h> /* OpenMP functions */ #endif #if defined(SUNDIALS_EXTENDED_PRECISION) #define GSYM "Lg" #define ESYM "Le" #define FSYM "Lf" #else #define GSYM "g" #define ESYM "e" #define FSYM "f" #endif /* constants */ #define ZERO RCONST(0.0) #define PT25 RCONST(0.25) #define PT4 RCONST(0.4) #define PT5 RCONST(0.5) #define PT55 RCONST(0.55) #define PT7 RCONST(0.7) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define TWOHUNDRED RCONST(200.0) #define FOURHUNDRED RCONST(400.0) #define FIVEHUNDRED RCONST(500.0) #define SIXHUNDRED RCONST(600.0) /* user data structure */ typedef struct { sunindextype N; /* current number of intervals */ realtype *x_host; /* current mesh on host */ realtype *x_dev; /* current mesh on device */ realtype k; /* diffusion coefficient */ realtype refine_tol; /* adaptivity tolerance */ } *UserData; /* User-supplied Functions Called by the Solver */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data); static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *user_data, N_Vector tmp); /* Private function to check function return values */ realtype * adapt_mesh(N_Vector y, sunindextype *Nnew, UserData udata); static int project(sunindextype Nold, realtype *xold, N_Vector yold, sunindextype Nnew, realtype *xnew, N_Vector ynew); static int check_flag(void *flagvalue, const char *funcname, int opt); /* Main Program */ int main() { /* general problem parameters */ realtype T0 = RCONST(0.0); /* initial time */ realtype Tf = RCONST(1.0); /* final time */ realtype rtol = RCONST(1.e-3); /* relative tolerance */ realtype atol = RCONST(1.e-10); /* absolute tolerance */ realtype hscale = RCONST(1.0); /* time step change factor on resizes */ UserData udata = NULL; realtype *data; sunindextype N = 21; /* initial spatial mesh size */ realtype refine = RCONST(3.0e-3); /* adaptivity refinement tolerance */ realtype k = RCONST(0.5); /* heat conductivity */ sunindextype i; long int nni, nni_tot=0, nli, nli_tot=0; int iout=0; /* general problem variables */ int flag; /* reusable error-checking flag */ N_Vector y = NULL; /* empty vector for storing solution */ N_Vector y2 = NULL; /* empty vector for storing solution */ N_Vector yt = NULL; /* empty vector for swapping */ SUNLinearSolver LS = NULL; /* empty linear solver object */ void *arkode_mem = NULL; /* empty ARKode memory structure */ FILE *XFID, *UFID; realtype t, olddt, newdt; realtype *xnew_host = NULL; realtype *xnew_dev = NULL; sunindextype Nnew; int dev, host; /* Create the SUNDIALS context object for this simulation */ SUNContext ctx; flag = SUNContext_Create(NULL, &ctx); if (check_flag(&flag, "SUNContext_Create", 1)) return 1; /* get host and offloading device */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* allocate and fill initial udata structure */ udata = (UserData) malloc(sizeof(*udata)); udata->N = N; udata->k = k; udata->refine_tol = refine; udata->x_host = malloc(N * sizeof(realtype)); for (i=0; i<N; i++) udata->x_host[i] = ONE*i/(N-1); udata->x_dev = omp_target_alloc(N * sizeof(realtype), dev); omp_target_memcpy(udata->x_dev, udata->x_host, N * sizeof(realtype), 0, 0, dev, host); /* Initial problem output */ printf("\n1D adaptive Heat PDE test problem:\n"); printf(" diffusion coefficient: k = %"GSYM"\n", udata->k); printf(" initial N = %li\n", (long int) udata->N); /* Initialize data structures */ y = N_VNew_OpenMPDEV(N, ctx); /* Create initial OpenMPDEV vector for solution */ if (check_flag((void *) y, "N_VNew_OpenMPDEV", 0)) return 1; N_VConst(ZERO, y); /* Set initial conditions */ /* output mesh to disk */ XFID=fopen("heat_mesh.txt","w"); /* output initial mesh to disk */ for (i=0; i<udata->N; i++) fprintf(XFID," %.16"ESYM, udata->x_host[i]); fprintf(XFID,"\n"); /* Open output stream for results, access data array */ UFID=fopen("heat1D.txt","w"); /* output initial condition to disk */ N_VCopyFromDevice_OpenMPDEV(y); data = N_VGetHostArrayPointer_OpenMPDEV(y); for (i=0; i<udata->N; i++) fprintf(UFID," %.16"ESYM, data[i]); fprintf(UFID,"\n"); /* Initialize the ARK timestepper */ arkode_mem = ARKStepCreate(NULL, f, T0, y, ctx); if (check_flag((void *) arkode_mem, "ARKStepCreate", 0)) return 1; /* Set routines */ flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */ if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1; flag = ARKStepSetMaxNumSteps(arkode_mem, 10000); /* Increase max num steps */ if (check_flag(&flag, "ARKStepSetMaxNumSteps", 1)) return 1; flag = ARKStepSStolerances(arkode_mem, rtol, atol); /* Specify tolerances */ if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1; flag = ARKStepSetAdaptivityMethod(arkode_mem, 2, 1, 0, NULL); /* Set adaptivity method */ if (check_flag(&flag, "ARKStepSetAdaptivityMethod", 1)) return 1; flag = ARKStepSetPredictorMethod(arkode_mem, 0); /* Set predictor method */ if (check_flag(&flag, "ARKStepSetPredictorMethod", 1)) return 1; /* Specify linearly implicit RHS, with time-dependent Jacobian */ flag = ARKStepSetLinear(arkode_mem, 1); if (check_flag(&flag, "ARKStepSetLinear", 1)) return 1; /* Initialize PCG solver -- no preconditioning, with up to N iterations */ LS = SUNLinSol_PCG(y, 0, (int) N, ctx); if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1; /* Linear solver interface -- set user-supplied J*v routine (no 'jtsetup' required) */ flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */ if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1; flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */ if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1; /* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then prints results. Stops when the final time has been reached */ t = T0; olddt = ZERO; newdt = ZERO; printf(" iout dt_old dt_new ||u||_rms N NNI NLI\n"); printf(" ----------------------------------------------------------------------------------------\n"); printf(" %4i %19.15"ESYM" %19.15"ESYM" %19.15"ESYM" %li %2i %3i\n", iout, olddt, newdt, SUNRsqrt(N_VDotProd(y,y)/udata->N), (long int) udata->N, 0, 0); while (t < Tf) { /* "set" routines */ flag = ARKStepSetStopTime(arkode_mem, Tf); if (check_flag(&flag, "ARKStepSetStopTime", 1)) return 1; flag = ARKStepSetInitStep(arkode_mem, newdt); if (check_flag(&flag, "ARKStepSetInitStep", 1)) return 1; /* call integrator */ flag = ARKStepEvolve(arkode_mem, Tf, y, &t, ARK_ONE_STEP); if (check_flag(&flag, "ARKStepEvolve", 1)) return 1; /* "get" routines */ flag = ARKStepGetLastStep(arkode_mem, &olddt); if (check_flag(&flag, "ARKStepGetLastStep", 1)) return 1; flag = ARKStepGetCurrentStep(arkode_mem, &newdt); if (check_flag(&flag, "ARKStepGetCurrentStep", 1)) return 1; flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni); if (check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1)) return 1; flag = ARKStepGetNumLinIters(arkode_mem, &nli); if (check_flag(&flag, "ARKStepGetNumLinIters", 1)) return 1; /* print current solution stats */ iout++; printf(" %4i %19.15"ESYM" %19.15"ESYM" %19.15"ESYM" %li %2li %3li\n", iout, olddt, newdt, SUNRsqrt(N_VDotProd(y,y)/udata->N), (long int) udata->N, nni, nli); nni_tot += nni; nli_tot += nli; /* output results and current mesh to disk */ N_VCopyFromDevice_OpenMPDEV(y); data = N_VGetHostArrayPointer_OpenMPDEV(y); for (i=0; i<udata->N; i++) fprintf(UFID," %.16"ESYM, data[i]); fprintf(UFID,"\n"); for (i=0; i<udata->N; i++) fprintf(XFID," %.16"ESYM, udata->x_host[i]); fprintf(XFID,"\n"); /* adapt the spatial mesh */ xnew_host = adapt_mesh(y, &Nnew, udata); if (check_flag(xnew_host, "ark_adapt", 0)) return 1; /* create N_Vector of new length */ y2 = N_VNew_OpenMPDEV(Nnew, ctx); if (check_flag((void *) y2, "N_VNew_OpenMPDEV", 0)) return 1; /* copy new mesh from host array to device array */ xnew_dev = omp_target_alloc(Nnew * sizeof(realtype), dev); omp_target_memcpy(xnew_dev, xnew_host, Nnew*sizeof(realtype), 0, 0, dev, host); /* project solution onto new mesh */ flag = project(udata->N, udata->x_dev, y, Nnew, xnew_dev, y2); if (check_flag(&flag, "project", 1)) return 1; /* delete old vector, old mesh */ N_VDestroy(y); free(udata->x_host); omp_target_free(udata->x_dev, dev); /* swap x and xnew so that new mesh is stored in udata structure */ udata->x_host = xnew_host; xnew_host = NULL; udata->N = Nnew; /* store size of new mesh */ udata->x_dev = xnew_dev; xnew_dev = NULL; /* swap y and y2 so that y holds new solution */ yt = y; y = y2; y2 = yt; /* call ARKStepResize to notify integrator of change in mesh */ flag = ARKStepResize(arkode_mem, y, hscale, t, NULL, NULL); if (check_flag(&flag, "ARKStepResize", 1)) return 1; /* destroy and re-allocate linear solver memory; reattach to ARKStep interface */ SUNLinSolFree(LS); LS = SUNLinSol_PCG(y, 0, (int) N, ctx); if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1; flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1; flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1; } printf(" ----------------------------------------------------------------------------------------\n"); /* print some final statistics */ printf(" Final solver statistics:\n"); printf(" Total number of time steps = %i\n", iout); printf(" Total nonlinear iterations = %li\n", nni_tot); printf(" Total linear iterations = %li\n\n", nli_tot); /* Clean up and return with successful completion */ fclose(UFID); fclose(XFID); N_VDestroy(y); /* Free vectors */ free(udata->x_host); /* Free user data */ omp_target_free(udata->x_dev, dev); free(udata); ARKStepFree(&arkode_mem); /* Free integrator memory */ SUNLinSolFree(LS); /* Free linear solver */ SUNContext_Free(&ctx); /* Free context */ return 0; } /*-------------------------------- * Functions called by the solver *--------------------------------*/ /* f routine to compute the ODE RHS function f(t,y). */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data) { UserData udata = (UserData) user_data; /* access problem data */ sunindextype N = udata->N; /* set variable shortcuts */ realtype k = udata->k; realtype *x = udata->x_dev; realtype *Y=NULL, *Ydot=NULL; realtype dxL, dxR; sunindextype i; int dev; dev = omp_get_default_device(); /* access data arrays */ Y = N_VGetDeviceArrayPointer_OpenMPDEV(y); if (check_flag((void *) Y, "N_VGetDeviceArrayPointer", 0)) return 1; Ydot = N_VGetDeviceArrayPointer_OpenMPDEV(ydot); if (check_flag((void *) Ydot, "N_VGetDeviceArrayPointer", 0)) return 1; /* Initialize ydot to zero - also handles boundary conditions */ N_VConst(ZERO, ydot); /* iterate over domain interior, computing all equations */ #pragma omp target map(to:N) is_device_ptr(x, Ydot, Y) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i=1; i<N-1; i++) { /* interior */ dxL = x[i]-x[i-1]; dxR = x[i+1]-x[i]; Ydot[i] = Y[i-1]*k*TWO/(dxL*(dxL+dxR)) - Y[i]*k*TWO/(dxL*dxR) + Y[i+1]*k*TWO/(dxR*(dxL+dxR)) + TWO*SUNRexp(-TWOHUNDRED*(x[i]-PT25)*(x[i]-PT25)) /* source term */ - SUNRexp(-FOURHUNDRED*(x[i]-PT7)*(x[i]-PT7)) + SUNRexp(-FIVEHUNDRED*(x[i]-PT4)*(x[i]-PT4)) - TWO*SUNRexp(-SIXHUNDRED*(x[i]-PT55)*(x[i]-PT55)); } return 0; /* Return with success */ } /* Jacobian routine to compute J(t,y) = df/dy. */ static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector fy, void *user_data, N_Vector tmp) { UserData udata = (UserData) user_data; /* variable shortcuts */ sunindextype N = udata->N; realtype k = udata->k; realtype *x = udata->x_dev; realtype *V=NULL, *JV=NULL; realtype dxL, dxR; sunindextype i; int dev; dev = omp_get_default_device(); /* access data arrays */ V = N_VGetDeviceArrayPointer_OpenMPDEV(v); if (check_flag((void *) V, "N_VGetDeviceArrayPointer", 0)) return 1; JV = N_VGetDeviceArrayPointer_OpenMPDEV(Jv); if (check_flag((void *) JV, "N_VGetDeviceArrayPointer", 0)) return 1; /* initialize Jv product to zero - also handles boundary conditions */ N_VConst(ZERO, Jv); /* iterate over domain, computing all Jacobian-vector products */ #pragma omp target map(to:N) is_device_ptr(x, JV, V) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i=1; i<N-1; i++) { dxL = x[i]-x[i-1]; dxR = x[i+1]-x[i]; JV[i] = V[i-1]*k*TWO/(dxL*(dxL+dxR)) - V[i]*k*TWO/(dxL*dxR) + V[i+1]*k*TWO/(dxR*(dxL+dxR)); } return 0; /* Return with success */ } /*------------------------------- * Private helper functions *-------------------------------*/ /* Adapts the current mesh, using a simple adaptivity strategy of refining when an approximation of the scaled second-derivative is too large. We only do this in one sweep, so no attempt is made to ensure the resulting mesh meets these same criteria after adaptivity: y [input] -- the current solution vector Nnew [output] -- the size of the new mesh udata [input] -- the current system information The return for this function is a pointer to the new mesh. */ realtype* adapt_mesh(N_Vector y, sunindextype *Nnew, UserData udata) { sunindextype i, j; int *marks=NULL; realtype ydd, *xold=NULL, *Y=NULL, *xnew=NULL; sunindextype num_refine, N_new; /* Access current solution and mesh arrays */ xold = udata->x_host; Y = N_VGetHostArrayPointer_OpenMPDEV(y); /* assumes copy to host already done */ if (check_flag((void *) Y, "N_VGetHostArrayPointer_OpenMPDEV", 0)) return NULL; /* create marking array */ marks = calloc(udata->N-1, sizeof(int)); /* perform marking: 0 -> leave alone 1 -> refine */ for (i=1; i<udata->N-1; i++) { /* approximate scaled second-derivative */ ydd = Y[i-1] - TWO*Y[i] + Y[i+1]; /* check for refinement */ if (fabs(ydd) > udata->refine_tol) { marks[i-1] = 1; marks[i] = 1; } } /* allocate new mesh */ num_refine = 0; for (i=0; i<udata->N-1; i++) if (marks[i] == 1) num_refine++; N_new = udata->N + num_refine; *Nnew = N_new; /* Store new array length */ xnew = malloc((N_new) * sizeof(realtype)); /* fill new mesh */ xnew[0] = xold[0]; /* store endpoints */ xnew[N_new-1] = xold[udata->N-1]; j=1; /* iterate over old intervals */ for (i=0; i<udata->N-1; i++) { /* if mark is 0, reuse old interval */ if (marks[i] == 0) { xnew[j++] = xold[i+1]; continue; } /* if mark is 1, refine old interval */ if (marks[i] == 1) { xnew[j++] = PT5*(xold[i]+xold[i+1]); xnew[j++] = xold[i+1]; continue; } } /* verify that new mesh is legal */ for (i=0; i<N_new-1; i++) { if (xnew[i+1] <= xnew[i]) { fprintf(stderr,"adapt_mesh error: illegal mesh created\n"); free(xnew); return NULL; } } free(marks); /* Delete marking array */ return xnew; /* Return with success */ } /* Projects one vector onto another: Nold [input] -- the size of the old mesh xold [input] -- the old mesh yold [input] -- the vector defined over the old mesh Nnew [input] -- the size of the new mesh xnew [input] -- the new mesh ynew [output] -- the vector defined over the new mesh (allocated prior to calling project) */ static int project(sunindextype Nold, realtype *xold, N_Vector yold, sunindextype Nnew, realtype *xnew, N_Vector ynew) { sunindextype iv, i, j; realtype *Yold=NULL, *Ynew=NULL; int dev = omp_get_default_device(); /* Access data arrays */ Yold = N_VGetDeviceArrayPointer_OpenMPDEV(yold); /* access data arrays */ if (check_flag((void *) Yold, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; Ynew = N_VGetDeviceArrayPointer_OpenMPDEV(ynew); if (check_flag((void *) Ynew, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return 1; /* loop over new mesh, finding corresponding interval within old mesh, and perform piecewise linear interpolation from yold to ynew */ iv=0; #pragma omp target map(to:iv) is_device_ptr(Yold,Ynew,xnew,xold) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) { for (i=0; i<Nnew; i++) { /* find old interval, start with previous value since sorted */ for (j=iv; j<Nold-1; j++) { if (xnew[i] >= xold[j] && xnew[i] <= xold[j+1]) { iv = j; break; } iv = Nold-1; /* just in case it wasn't found above */ } /* perform interpolation */ Ynew[i] = Yold[iv]*(xnew[i]-xold[iv+1])/(xold[iv]-xold[iv+1]) + Yold[iv+1]*(xnew[i]-xold[iv])/(xold[iv+1]-xold[iv]); } } return 0; /* Return with success */ } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a flag so check if flag >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_flag(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } /* Check if flag < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n", funcname, *errflag); return 1; }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } return 0; } /*---- end of file ----*/
task.c
#include <stdlib.h> #include <stdio.h> #include "omp.h" #include "../utilities/check.h" #define PRINT(_args...) //#define PRINT(_args...) printf(_args) #define N 64 #define TRY_TASK 1 #define TASK_COMPUTE 1 #define OFF 1 #define T1 1 #define T1 1 #define T2 1 #define T3 1 #define T4 1 #define T5 1 #define T6 1 #define T7 1 #define T8 1 #define T9 1 #define T10 1 #define T11 1 #define T12 1 int main () { int a[N], aa[N]; int b[N], bb[N]; int c[N], cc[N]; int d[N], dd[N]; int e[N], ee[N]; int i, errors; int cond = 0; #if OFF check_offloading(); #endif // Test: task within target #if T1 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { int id = omp_get_thread_num(); a[id]++; #if TRY_TASK #pragma omp task firstprivate(id) shared(b) default(none) { #if TASK_COMPUTE PRINT("hi alex from %d\n", id); b[id]++; #endif } #pragma omp taskwait #endif } // reproduce aa[0]++; #if TRY_TASK && TASK_COMPUTE bb[0]++; #endif // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#1 got %d errors\n", errors); #endif // Test: task within parallel #if T2 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #if TRY_TASK #pragma omp task firstprivate(id) shared(b) { #if TASK_COMPUTE PRINT("hi alex from %d\n", id); b[id]++; #endif } #endif } } // reproduce for(i=0; i<N; i++) { aa[i]++; #if TRY_TASK && TASK_COMPUTE bb[i]++; #endif } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#2 got %d errors\n", errors); #endif // Test: multiple nested tasks in parallel region #if T3 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #pragma omp task firstprivate(id) shared(b) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } } } } } // reproduce for(i=0; i<N; i++) { aa[i]++; bb[i]+=3; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#3 got %d errors\n", errors); #endif // Test: three successive tasks in a parallel region #if T4 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #pragma omp task firstprivate(id) shared(b) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } #pragma omp task firstprivate(id) shared(b) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } #pragma omp task firstprivate(id) shared(b) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } } } // reproduce for(i=0; i<N; i++) { aa[i]++; bb[i]+=3; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#4 got %d errors\n", errors); #endif // Test: change of context when entering/exiting tasks #if T5 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i+1; c[i] = cc[i] = 3*i+1; d[i] = dd[i] = 4*i+1; e[i] = ee[i] = 5*i+1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b, c, d, e) { omp_set_schedule(omp_sched_static, 1); #pragma omp parallel num_threads(64) { omp_set_schedule(omp_sched_static, 2); int id = omp_get_thread_num(); // task 1 #pragma omp task firstprivate(id) shared(b, c, d, e) { omp_set_schedule(omp_sched_static, 3); PRINT("hi alex from %d\n", id); // task 2 #pragma omp task firstprivate(id) shared(b, c, d, e) { omp_set_schedule(omp_sched_static, 4); PRINT("hi alex from %d\n", id); // task 3 #pragma omp task firstprivate(id) shared(b, c, d, e) { omp_set_schedule(omp_sched_static, 5); PRINT("hi alex from %d\n", id); // task 3 omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 5) e[id]++; } // task 2 omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 4) d[id]++; } // task 1 omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 3) c[id]++; } // par omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 2) b[id]++; } // team omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 1) a[0]++; } // reproduce aa[0]++; for(i=0; i<N; i++) { bb[i]++; cc[i]++; dd[i]++; ee[i]++; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); if (c[i] != cc[i]) printf("%4i: got c %d, expected %d, error %d\n", i, c[i], cc[i], ++errors); if (d[i] != dd[i]) printf("%4i: got d %d, expected %d, error %d\n", i, d[i], dd[i], ++errors); if (e[i] != ee[i]) printf("%4i: got e %d, expected %d, error %d\n", i, e[i], ee[i], ++errors); } printf("#5 got %d errors\n", errors); #endif // Test: change of context when using if clause #if T6 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i+1; c[i] = cc[i] = 3*i+1; d[i] = dd[i] = 4*i+1; e[i] = ee[i] = 5*i+1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b, c, d, e, cond) { omp_set_schedule(omp_sched_static, 1); #pragma omp parallel num_threads(64) { omp_set_schedule(omp_sched_static, 2); int id = omp_get_thread_num(); // task 1 #pragma omp task firstprivate(id) shared(b, c, d, e) if(cond) { omp_set_schedule(omp_sched_static, 3); PRINT("hi alex from %d\n", id); // task 2 #pragma omp task firstprivate(id) shared(b, c, d, e) if(cond) { omp_set_schedule(omp_sched_static, 4); PRINT("hi alex from %d\n", id); // task 3 #pragma omp task firstprivate(id) shared(b, c, d, e) if(cond) { omp_set_schedule(omp_sched_static, 5); PRINT("hi alex from %d\n", id); // task 3 omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 5) e[id]++; } // task 2 omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 4) d[id]++; } // task 1 omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 3) c[id]++; } // par omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 2) b[id]++; } // team omp_sched_t s; int chunk; omp_get_schedule(&s, &chunk); if (s == omp_sched_static && chunk == 1) a[0]++; } // reproduce aa[0]++; for(i=0; i<N; i++) { bb[i]++; cc[i]++; dd[i]++; ee[i]++; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); if (c[i] != cc[i]) printf("%4i: got c %d, expected %d, error %d\n", i, c[i], cc[i], ++errors); if (d[i] != dd[i]) printf("%4i: got d %d, expected %d, error %d\n", i, d[i], dd[i], ++errors); if (e[i] != ee[i]) printf("%4i: got e %d, expected %d, error %d\n", i, e[i], ee[i], ++errors); } printf("#6 got %d errors\n", errors); #endif // Test: final #if T7 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #pragma omp task firstprivate(id) shared(b) final(1) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) final(1) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) final(1) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } } } } } // reproduce for(i=0; i<N; i++) { aa[i]++; bb[i]+=3; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#7 got %d errors\n", errors); #endif // Test: untied #if T8 && 0 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #pragma omp task firstprivate(id) shared(b) untied { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) untied { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) untied { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } } } } } // reproduce for(i=0; i<N; i++) { aa[i]++; bb[i]+=3; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#8 got %d errors\n", errors); #endif // Test: mergeaeble #if T9 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #pragma omp task firstprivate(id) shared(b) { PRINT("hi alex from %d\n", id); //#pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) mergeable { PRINT("hi alex from %d\n", id); //#pragma omp atomic b[id]++; #pragma omp task firstprivate(id) shared(b) mergeable { PRINT("hi alex from %d\n", id); //#pragma omp atomic b[id]++; } } } } } // reproduce for(i=0; i<N; i++) { aa[i]++; bb[i]+=3; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#9 got %d errors\n", errors); #endif // Test: private #if T10 && 0 /* Test disabled because this test only works on the GPU, where a task is guaranteed to work on the same thread as it is created. This is not true in general on the host. So we cannot use this test generally. Thus I am disabling it here */ // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #if TRY_TASK #pragma omp task private(id) shared(b) { int id = omp_get_thread_num(); #if TASK_COMPUTE PRINT("hi alex from %d\n", id); b[id]++; #endif } #endif } } // reproduce for(i=0; i<N; i++) { aa[i]++; #if TRY_TASK && TASK_COMPUTE bb[i]++; #endif } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#10 got %d errors\n", errors); #endif // Test: depend #if T11 // init int x; for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #pragma omp task firstprivate(id) shared(b) depend(out:a[id]) { PRINT("hi alex from %d\n", id); b[id]++; } #pragma omp task firstprivate(id) shared(b) depend(inout:a[id]) { PRINT("hi alex from %d\n", id); b[id]++; } #pragma omp task firstprivate(id) shared(b) depend(in:a[id]) { PRINT("hi alex from %d\n", id); b[id]++; } } } // reproduce for(i=0; i<N; i++) { aa[i]++; bb[i]+=3; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#11 got %d errors\n", errors); #endif // Test: inverted priority #if T12 // init for(i=0; i<N; i++) { a[i] = aa[i] = i+1; b[i] = bb[i] = 2*i +1; } // target starts 1 team and many threads in it #pragma omp target map(tofrom: a, b) { #pragma omp parallel num_threads(64) { int id = omp_get_thread_num(); a[id]++; #pragma omp task firstprivate(id) shared(b) priority(0) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } #pragma omp task firstprivate(id) shared(b) priority(10) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } #pragma omp task firstprivate(id) shared(b) priority(20) { PRINT("hi alex from %d\n", id); #pragma omp atomic b[id]++; } } } // reproduce for(i=0; i<N; i++) { aa[i]++; bb[i]+=3; } // verify errors = 0; for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%4i: got a %d, expected %d, error %d\n", i, a[i], aa[i], ++errors); if (b[i] != bb[i]) printf("%4i: got b %d, expected %d, error %d\n", i, b[i], bb[i], ++errors); } printf("#12 got %d errors\n", errors); #endif return 0; }
SpatialConvolutionMap.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionMap.c" #else static int nn_(SpatialConvolutionMap_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); luaL_argcheck(L, input->nDimension == 3, 2, "3D tensor expected"); luaL_argcheck(L, input->size[0] >= nInputPlane, 2, "invalid number of input planes"); luaL_argcheck(L, input->size[2] >= kW && input->size[1] >= kH, 2, "input image smaller than kernel size"); THTensor_(resize3d)(output, nOutputPlane, (input->size[1] - kH) / dH + 1, (input->size[2] - kW) / dW + 1); // contiguous input = THTensor_(newContiguous)(input); output = THTensor_(newContiguous)(output); // get raw pointers real *input_data = THTensor_(data)(input); real *output_data = THTensor_(data)(output); real *weight_data = THTensor_(data)(weight); real *bias_data = THTensor_(data)(bias); real *connTable_data = THTensor_(data)(connTable); // and dims long input_h = input->size[1]; long input_w = input->size[2]; long output_h = output->size[1]; long output_w = output->size[2]; long weight_h = weight->size[1]; long weight_w = weight->size[2]; long p; #pragma omp parallel for private(p) for (p = 0; p < nOutputPlane; p++) { // add bias real *ptr_output = output_data + p*output_w*output_h; long j; for(j = 0; j < output_h*output_w; j++) ptr_output[j] = bias_data[p]; // convolve all maps int nweight = connTable->size[0]; long k; for (k = 0; k < nweight; k++) { // get offsets for input/output int o = (int)connTable_data[k*2+1]-1; int i = (int)connTable_data[k*2+0]-1; if (o == p) { THTensor_(validXCorr2Dptr)(output_data + o*output_w*output_h, 1.0, input_data + i*input_w*input_h, input_h, input_w, weight_data + k*weight_w*weight_h, weight_h, weight_w, dH, dW); } } } // clean up THTensor_(free)(input); THTensor_(free)(output); return 1; } static int nn_(SpatialConvolutionMap_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane"); THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); // contiguous gradInput = THTensor_(newContiguous)(gradInput); gradOutput = THTensor_(newContiguous)(gradOutput); // Resize/Zero THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); // get raw pointers real *gradInput_data = THTensor_(data)(gradInput); real *gradOutput_data = THTensor_(data)(gradOutput); real *weight_data = THTensor_(data)(weight); real *connTable_data = THTensor_(data)(connTable); // and dims long input_h = input->size[1]; long input_w = input->size[2]; long output_h = gradOutput->size[1]; long output_w = gradOutput->size[2]; long weight_h = weight->size[1]; long weight_w = weight->size[2]; long p; #pragma omp parallel for private(p) for(p = 0; p < nInputPlane; p++) { long k; // backward all int nkernel = connTable->size[0]; for(k = 0; k < nkernel; k++) { int o = (int)connTable_data[k*2+1]-1; int i = (int)connTable_data[k*2+0]-1; if (i == p) { // gradient to input THTensor_(fullConv2Dptr)(gradInput_data + i*input_w*input_h, 1.0, gradOutput_data + o*output_w*output_h, output_h, output_w, weight_data + k*weight_w*weight_h, weight_h, weight_w, dH, dW); } } } // clean up THTensor_(free)(gradInput); THTensor_(free)(gradOutput); return 1; } static int nn_(SpatialConvolutionMap_accGradParameters)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); real scale = luaL_optnumber(L, 4, 1); THTensor *connTable = luaT_getfieldcheckudata(L, 1, "connTable", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor); THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor); // contiguous input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); // get raw pointers real *input_data = THTensor_(data)(input); real *gradOutput_data = THTensor_(data)(gradOutput); real *gradWeight_data = THTensor_(data)(gradWeight); real *gradBias_data = THTensor_(data)(gradBias); // and dims long input_h = input->size[1]; long input_w = input->size[2]; long output_h = gradOutput->size[1]; long output_w = gradOutput->size[2]; long weight_h = weight->size[1]; long weight_w = weight->size[2]; // gradients wrt bias long k; #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { real *ptr_gradOutput = gradOutput_data + k*output_w*output_h; long l; for(l = 0; l < output_h*output_w; l++) gradBias_data[k] += scale*ptr_gradOutput[l]; } // gradients wrt weight int nkernel = connTable->size[0]; #pragma omp parallel for private(k) for(k = 0; k < nkernel; k++) { int o = (int)THTensor_(get2d)(connTable,k,1)-1; int i = (int)THTensor_(get2d)(connTable,k,0)-1; // gradient to kernel THTensor_(validXCorr2DRevptr)(gradWeight_data + k*weight_w*weight_h, scale, input_data + i*input_w*input_h, input_h, input_w, gradOutput_data + o*output_w*output_h, output_h, output_w, dH, dW); } // clean up THTensor_(free)(input); THTensor_(free)(gradOutput); return 0; } static const struct luaL_Reg nn_(SpatialConvolutionMap__) [] = { {"SpatialConvolutionMap_updateOutput", nn_(SpatialConvolutionMap_updateOutput)}, {"SpatialConvolutionMap_updateGradInput", nn_(SpatialConvolutionMap_updateGradInput)}, {"SpatialConvolutionMap_accGradParameters", nn_(SpatialConvolutionMap_accGradParameters)}, {NULL, NULL} }; static void nn_(SpatialConvolutionMap_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(SpatialConvolutionMap__), "nn"); lua_pop(L,1); } #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(8*t1+Nx+13,64)),floord(16*t2+Nx+12,64)),floord(8*t3+Nx+4,64)),floord(16*t1-16*t2+Nz+Nx+11,64));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),64*t4+62),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
core_zgemm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_gemm * * Performs one of the matrix-matrix operations * * \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^H, \f] * * alpha and beta are scalars, and A, B and C are matrices, with op( A ) * an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix. * ******************************************************************************* * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] transb * - PlasmaNoTrans: B is not transposed, * - PlasmaTrans: B is transposed, * - PlasmaConjTrans: B is conjugate transposed. * * @param[in] m * The number of rows of the matrix op( A ) and of the matrix C. * m >= 0. * * @param[in] n * The number of columns of the matrix op( B ) and of the matrix C. * n >= 0. * * @param[in] k * The number of columns of the matrix op( A ) and the number of rows * of the matrix op( B ). k >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] A * An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans, * and is m otherwise. * * @param[in] lda * The leading dimension of the array A. * When transa = PlasmaNoTrans, lda >= max(1,m), * otherwise, lda >= max(1,k). * * @param[in] B * An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans, * and is k otherwise. * * @param[in] ldb * The leading dimension of the array B. * When transb = PlasmaNoTrans, ldb >= max(1,k), * otherwise, ldb >= max(1,n). * * @param[in] beta * The scalar beta. * * @param[in,out] C * An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n * matrix ( alpha*op( A )*op( B ) + beta*C ). * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************/ void core_zgemm(plasma_enum_t transa, plasma_enum_t transb, int m, int n, int k, plasma_complex64_t alpha, const plasma_complex64_t *A, int lda, const plasma_complex64_t *B, int ldb, plasma_complex64_t beta, plasma_complex64_t *C, int ldc) { cblas_zgemm(CblasColMajor, (CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb, m, n, k, CBLAS_SADDR(alpha), A, lda, B, ldb, CBLAS_SADDR(beta), C, ldc); } /******************************************************************************/ void core_omp_zgemm( plasma_enum_t transa, plasma_enum_t transb, int m, int n, int k, plasma_complex64_t alpha, const plasma_complex64_t *A, int lda, const plasma_complex64_t *B, int ldb, plasma_complex64_t beta, plasma_complex64_t *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (transa == PlasmaNoTrans) ak = k; else ak = m; int bk; if (transb == PlasmaNoTrans) bk = n; else bk = k; #pragma omp task depend(in:A[0:lda*ak]) \ depend(in:B[0:ldb*bk]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) core_zgemm(transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } }
generator.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef GENERATOR_H_ #define GENERATOR_H_ #include <algorithm> #include <cinttypes> #include <random> #include "graph.h" #include "pvector.h" #include "util.h" /* GAP Benchmark Suite Class: Generator Author: Scott Beamer Given scale and degree, generates edgelist for synthetic graph - Intended to be called from Builder - GenerateEL(uniform) generates and returns the edgelist - Can generate uniform random (uniform=true) or R-MAT graph according to Graph500 parameters (uniform=false) - Can also randomize weights within a weighted edgelist (InsertWeights) - Blocking/reseeding is for parallelism with deterministic output edgelist */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, typename TimestampT_ = WeightT_> class Generator { typedef EdgePair<NodeID_, DestID_> Edge; typedef EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_, TimestampT_>> WEdge; typedef pvector<Edge> EdgeList; public: Generator(int scale, int degree) { scale_ = scale; num_nodes_ = 1l << scale; num_edges_ = num_nodes_ * degree; if (num_nodes_ > std::numeric_limits<NodeID_>::max()) { std::cout << "NodeID type (max: " << std::numeric_limits<NodeID_>::max(); std::cout << ") too small to hold " << num_nodes_ << std::endl; std::cout << "Recommend changing NodeID (typedef'd in src/benchmark.h)"; std::cout << " to a wider type and recompiling" << std::endl; std::exit(-31); } } void PermuteIDs(EdgeList &el) { pvector<NodeID_> permutation(num_nodes_); std::mt19937 rng(kRandSeed); #pragma omp parallel for for (NodeID_ n=0; n < num_nodes_; n++) permutation[n] = n; shuffle(permutation.begin(), permutation.end(), rng); #pragma omp parallel for for (int64_t e=0; e < num_edges_; e++) el[e] = Edge(permutation[el[e].u], permutation[el[e].v]); } EdgeList MakeUniformEL() { EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<NodeID_> udist(0, num_nodes_-1); #pragma omp for for (int64_t block=0; block < num_edges_; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, num_edges_); e++) { el[e] = Edge(udist(rng), udist(rng)); } } } return el; } EdgeList MakeRMatEL() { const float A = 0.57f, B = 0.19f, C = 0.19f; EdgeList el(num_edges_); #pragma omp parallel { std::mt19937 rng; std::uniform_real_distribution<float> udist(0, 1.0f); #pragma omp for for (int64_t block=0; block < num_edges_; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, num_edges_); e++) { NodeID_ src = 0, dst = 0; for (int depth=0; depth < scale_; depth++) { float rand_point = udist(rng); src = src << 1; dst = dst << 1; if (rand_point < A+B) { if (rand_point > A) dst++; } else { src++; if (rand_point > A+B+C) dst++; } } el[e] = Edge(src, dst); } } } PermuteIDs(el); // TIME_PRINT("Shuffle", std::shuffle(el.begin(), el.end(), // std::mt19937())); return el; } EdgeList GenerateEL(bool uniform) { EdgeList el; Timer t; t.Start(); if (uniform) el = MakeUniformEL(); else el = MakeRMatEL(); t.Stop(); PrintTime("Generate Time", t.Seconds()); return el; } static void InsertWeights(pvector<EdgePair<NodeID_, NodeID_>> &el) {} // Overwrites existing weights with random from [1,255] static void InsertWeights(pvector<WEdge> &el) { #pragma omp parallel { std::mt19937 rng; std::uniform_int_distribution<int> udist(1, 255); int64_t el_size = el.size(); #pragma omp for for (int64_t block=0; block < el_size; block+=block_size) { rng.seed(kRandSeed + block/block_size); for (int64_t e=block; e < std::min(block+block_size, el_size); e++) { el[e].v.w = static_cast<WeightT_>(udist(rng)+1); // to make sure weight is not zero } } } } private: int scale_; int64_t num_nodes_; int64_t num_edges_; static const int64_t block_size = 1<<18; }; #endif // GENERATOR_H_
convolution_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(sptr + space_ofs[k] * packn, vl), vl); vfloat32m2_t _w = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(kptr, vl), vl); _sum = vfmacc_vv_f32m2(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = (__fp16)sum; } outptr += outw; } } } static void convolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
GB_unaryop__minv_int16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_fp32 // op(A') function: GB_tran__minv_int16_fp32 // C type: int16_t // A type: float // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ float #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z ; GB_CAST_SIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_fp32 ( int16_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
findSubGraphs.c
#include "defs.h" double findSubGraphs(graph* G, edge* maxIntWtList, int maxIntWtListSize) { mcsim_skip_instrs_begin(); VERT_T* S; LONG_T *start; char* visited; LONG_T *pSCount; #ifdef _OPENMP omp_lock_t* vLock; #endif LONG_T phase_num, numPhases; LONG_T count; double elapsed_time = get_seconds(); numPhases = SubGraphPathLength + 1; #ifdef _OPENMP omp_set_num_threads(NUM_THREADS); #pragma omp parallel { #endif VERT_T *pS, *pSt; LONG_T pCount, pS_size; LONG_T v, w, search_num; int tid, nthreads; LONG_T j, k, vert, n; #ifdef _OPENMP LONG_T i; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif n = G->n; pS_size = n/nthreads + 1; pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T)); assert(pS != NULL); if (tid == 0) { S = (VERT_T *) malloc(n*sizeof(VERT_T)); visited = (char *) calloc(n, sizeof(char)); start = (LONG_T *) calloc((numPhases+2), sizeof(LONG_T)); pSCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif mcsim_skip_instrs_end(); for (search_num=0; search_num<maxIntWtListSize; search_num++) { mcsim_skip_instrs_begin(); #ifdef _OPENMP #pragma omp barrier #endif /* Run path-limited BFS in parallel */ if (tid == 0) { free(visited); visited = (char *) calloc(n, sizeof(char)); S[0] = maxIntWtList[search_num].startVertex; S[1] = maxIntWtList[search_num].endVertex; visited[S[0]] = (char) 1; visited[S[1]] = (char) 1; count = 2; phase_num = 1; start[0] = 0; start[1] = 1; start[2] = 2; } mcsim_skip_instrs_end(); #ifdef _OPENMP #pragma omp barrier #endif while (phase_num <= SubGraphPathLength) { pCount = 0; #ifdef _OPENMP #pragma omp for #endif for (vert=start[phase_num]; vert<start[phase_num+1]; vert++) { v = S[vert]; mcsim_skip_instrs_begin(); #ifdef UNDOLOG VERT_T *undolog_pS; undolog_pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T)); #endif // UNDOLOG #ifdef REDOLOG VERT_T *redolog_pS; redolog_pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T)); #endif // REDOLOG mcsim_skip_instrs_end(); for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { w = G->endV[j]; if (v == w) continue; #ifdef _OPENMP int myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif if (visited[w] != (char) 1) { visited[w] = (char) 1; if (pCount == pS_size) { /* Resize pS */ pSt = (VERT_T *)malloc(2*pS_size*sizeof(VERT_T)); memcpy(pSt, pS, pS_size*sizeof(VERT_T)); free(pS); pS = pSt; pS_size = 2*pS_size; } mcsim_tx_begin(); #ifdef BASELINE mcsim_log_begin(); //mcsim_skip_instrs_begin(); #ifdef UNDOLOG undolog_pS[pCount] = pS[pCount]; #endif // UNDOLOG #ifdef REDOLOG redolog_pS[pCount] = w; #endif // REDOLOG //mcsim_skip_instrs_end(); mcsim_mem_fence(); mcsim_log_end(); mcsim_mem_fence(); #endif // BASELINE pS[pCount++] = w; mcsim_tx_end(); #ifdef CLWB mcsim_clwb( &( pS[pCount-1] ) ); #endif // CLWB } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } #endif } // make sure undolog and redolog data structures are not discarded by compiler mcsim_skip_instrs_begin(); #ifdef UNDOLOG printf("%d\n", (int)(sizeof undolog_pS)); #endif // UNDOLOG #ifdef REDOLOG printf("%d\n", (int)(sizeof redolog_pS)); #endif // REDOLOG mcsim_skip_instrs_end(); } #ifdef _OPENMP #pragma omp barrier #endif pSCount[tid+1] = pCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { pSCount[0] = start[phase_num+1]; for(k=1; k<=nthreads; k++) { pSCount[k] = pSCount[k-1] + pSCount[k]; } start[phase_num+2] = pSCount[nthreads]; count = pSCount[nthreads]; phase_num++; } #ifdef _OPENMP #pragma omp barrier #endif mcsim_skip_instrs_begin(); #ifdef UNDOLOG VERT_T *undolog_S; undolog_S = (VERT_T *) malloc(n*sizeof(VERT_T)); #endif // UNDOLOG #ifdef REDOLOG VERT_T *redolog_S; redolog_S = (VERT_T *) malloc(n*sizeof(VERT_T)); #endif // REDOLOG mcsim_skip_instrs_end(); for (k = pSCount[tid]; k < pSCount[tid+1]; k++) { mcsim_tx_begin(); #ifdef BASELINE mcsim_log_begin(); //mcsim_skip_instrs_begin(); #ifdef UNDOLOG undolog_S[k] = S[k]; #endif // UNDOLOG #ifdef REDOLOG redolog_S[k] = pS[k-pSCount[tid]]; #endif // REDOLOG //mcsim_skip_instrs_end(); mcsim_mem_fence(); mcsim_log_end(); mcsim_mem_fence(); #endif // BASELINE S[k] = pS[k-pSCount[tid]]; mcsim_tx_end(); #ifdef CLWB mcsim_clwb( &( S[k] ) ); #endif // CLWB } // make sure undolog and redolog data structures are not discarded by compiler mcsim_skip_instrs_begin(); #ifdef UNDOLOG printf("%d\n", (int)(sizeof undolog_S)); #endif // UNDOLOG #ifdef REDOLOG printf("%d\n", (int)(sizeof redolog_S)); #endif // REDOLOG mcsim_skip_instrs_end(); #ifdef _OPENMP #pragma omp barrier #endif } /* End of search */ mcsim_skip_instrs_begin(); if (tid == 0) { fprintf(stderr, "Search from <%ld, %ld>, number of vertices visited:" " %ld\n", (long) S[0], (long) S[1], (long) count); } } /* End of outer loop */ free(pS); #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #pragma omp barrier #endif if (tid == 0) { free(S); free(start); free(visited); free(pSCount); #ifdef _OPENMP free(vLock); #endif } #ifdef _OPENMP } #endif elapsed_time = get_seconds() - elapsed_time; mcsim_skip_instrs_end(); return elapsed_time; }
graphcoloring.c
#include <omp.h> #include <stdlib.h> #include <stdio.h> #include <time.h> struct node{ int color; int vertex; // unique id struct node* next; }; struct graph{ int numVertices; struct node** adjLists; // stores adjacent nodes to a given node struct node** adjListsCopy; // we can modify this one struct node** listOfNodes; // stores node to be accessible by index, alternatively could've been head of adjLists }; /** * Creates a node * @param v The label for the node * @return Pointer to the created node */ struct node* createNode(int v){ struct node* newNode = malloc(sizeof(struct node)); newNode->color = 0; // not needed since default is 0, but just being explicit newNode->next = NULL; newNode->vertex = v; return newNode; } /** * Creates a graph * @param vertices # of Vertices in our graph * @return Pointer to the created graph */ struct graph* createGraph(int vertices){ struct graph* newGraph = malloc(sizeof(struct graph)); newGraph->numVertices = vertices; newGraph->adjLists = malloc(vertices*sizeof(struct node*)); newGraph->adjListsCopy = malloc(vertices*sizeof(struct node*)); newGraph->listOfNodes = malloc(vertices*sizeof(struct node*)); for (int i = 0; i < vertices; i++){ newGraph->adjLists[i] = NULL; newGraph->adjListsCopy[i] = NULL; newGraph->listOfNodes[i] = createNode(i); } return newGraph; } /** * Add edge between source and dest * If an edge already exists then no edge is created * @param graph Graph that we want to add an edge in * @param source The source node * @param dest The destination node which might end up being different if an edge already existed * @return 0 if edge wasn't created (false) else 1 (true) */ int addEdge(struct graph* graph, int source, int dest) { // edge case (no self loops/edges allowed) if (source == dest) return 0; struct node* srcNode = graph->adjLists[source]; for (int i = 0; i < graph->numVertices; i++){ while(srcNode){ if (srcNode->vertex == dest){ return 0; } srcNode = srcNode->next; } } // We get here when there's no edge between source and dest // We need to add an edge from both ways in order for the program to work properly // Even though we're adding "2 edges", this still only counts as 1 edge from the user's POV // Add edge from source to dest struct node* newNode = createNode(dest); newNode->next = graph->adjLists[source]; graph->adjLists[source] = newNode; newNode->next = graph->adjListsCopy[source]; graph->adjListsCopy[source] = newNode; // Add edge from dest to source newNode = createNode(source); newNode->next = graph->adjLists[dest]; graph->adjLists[dest] = newNode; newNode->next = graph->adjListsCopy[dest]; graph->adjListsCopy[dest] = newNode; return 1; } /** * Prints the graph * @param graph to be printed */ void printGraph(struct graph* graph) { int v; printf("\n Printing the graph"); for (v = 0; v < graph->numVertices; v++) { struct node* temp = graph->adjLists[v]; printf("\n Vertex %d has an edge with the following vertices: ", v); while (temp) { printf("%d, ", temp->vertex); temp = temp->next; } } printf("\n\n"); } /** 4->2->6->1 * Sets color of node to lowest available color * NOTE: This might end up conflicting. This is ok. * This is why we have DetectConflicts() * @param adjList The adjacency list of the vertex that we want to color * @param vertex The vertex number that represents the vertex we want to color */ void setColor(struct node* adjList, struct graph* graph, int vertex){ int color = 1; struct node* adjNodes = adjList; struct node* node = graph->listOfNodes[vertex]; // printf("vertex current color %d \n", node->color); // Iterate through the adjacent list while(adjNodes){ // printf("current checking for %d \n", graph->listOfNodes[adjNodes->vertex]->color); if (graph->listOfNodes[adjNodes->vertex]->color == color){ color++; // check for next possible min color adjNodes = adjList; // reset } else { adjNodes = adjNodes->next; } } // printf("about to change value of node to %d \n \n", color); node->color = color; } /** * Partition nodes among the threads */ void assign(int threads, int nodes, struct node** adjLists, struct graph* graph, int firstTime) { // if firstTime then we treat adjLists as all the adjacent lists if (firstTime) { int partitionSize = nodes / threads; int carryOver = nodes % threads; // this extra work will be done by our final thread omp_set_num_threads(threads); int threadID, start, end; #pragma omp parallel private(threadID, start, end) { threadID = omp_get_thread_num(); printf("threadID is %d, part size is %d carry over is %d\n", threadID, partitionSize, carryOver); start = threadID * partitionSize; // inclusive if (threadID == 0) end = start + partitionSize; // thread 0 is special case bc it will mess up with multiplications else { end = start; end += (threadID == threads - 1) ? partitionSize + carryOver : partitionSize; // exclusive } printf("start is %d and end is %d \n", start, end); #pragma omp parallel for for (int v = start; v < end; v++) { struct node *temp = adjLists[v]; // printf("v is currently %d\n", v); setColor(temp, graph, v); } }} else { // if it's not our first time, then we know that adjacent lists contains the vertices with conflicts // we need to get the size of the list before we can partition int size = 0; struct node *traversal = adjLists[0]; while (traversal) { size++; traversal = traversal->next; } int partitionSize = size / threads; int carryOver = size % threads; // this extra work will be done by our final thread omp_set_num_threads(threads); int threadID, start, end; #pragma omp parallel private(threadID, start, end) { threadID = omp_get_thread_num(); start = threadID * partitionSize; // inclusive end = start; end += (threadID == threads - 1) ? partitionSize + carryOver : partitionSize; // exclusive #pragma omp parallel for for (int v = start; v < end; v++) { struct node *temp = adjLists[v]; // we extract the vertex # from our list of conflicts // with the vertex #, we extract it's adjList and call setColor accordingly setColor(graph->adjLists[temp->vertex], graph, temp->vertex); } } } } /** * Detects conflicts among the given nodes * @return list of vertices that had conflicts */ struct node** detectConflicts(int threads, int nodes, struct node** adjLists, struct graph* graph) { int partitionSize = nodes / threads; int carryOver = nodes % threads; // this extra work will be done by our final thread struct node **newAdjLists = malloc(nodes * sizeof(struct node *)); for (int i = 0; i < nodes; i++) adjLists[i] = NULL; int i = 0; omp_set_num_threads(threads); int threadID, start, end; int foundConflict = 0; #pragma omp parallel shared(i, foundConflict) private(threadID, start, end) { threadID = omp_get_thread_num(); start = threadID * partitionSize; // inclusive end = (threadID == threads - 1) ? partitionSize * threadID + carryOver : partitionSize * threadID; // exclusive #pragma omp parallel for (int v = start; v < end; v++) { struct node *currNode = graph->listOfNodes[v]; // aka V struct node *adjNodes = adjLists[v]; // aka U int color = currNode->color; while (adjNodes) { #pragma omp critical { if (adjNodes->color == color && adjNodes->vertex < v) { // newAdjLists[i++] = adjLists[v]; newAdjLists[i++] = currNode; foundConflict = 1; v++; // go to next, same as break but can't have break inside this section } } adjNodes = adjNodes->next; } } } return foundConflict ? newAdjLists : NULL; } void colorGraph(int threads, int nodes, struct graph* graph){ struct node** adjLists = graph->adjListsCopy; int firstIteration = 1; while(adjLists){ assign(threads,nodes,adjLists,graph, firstIteration); firstIteration = 0; adjLists = detectConflicts(threads, nodes, adjLists, graph); } } /** * Finds max degree in graph (not timed) * @param graph */ void findMaxDegree(struct graph* graph){ int v; int potentialMax = 0; int max = 0; for (v = 0; v < graph->numVertices; v++) { struct node* temp = graph->adjLists[v]; while (temp) { potentialMax++; temp = temp->next; } max = potentialMax > max ? potentialMax : max; potentialMax = 0; } printf("The max degree is: %d\n", max); } /** * Finds max color in graph (not timed) * @param graph */ void findMaxColor(struct graph* graph){ int v; int max = 0; for (v = 0; v < graph->numVertices; v++) { struct node* temp = graph->listOfNodes[v]; // could just check nodes, doesn't matter, not timed max = temp->color > max ? temp->color : max; temp = temp->next; } printf("The max color is: %d\n", max); } int main(int argc,char *argv[]) { // Assumes n > 3, t > 0, and 0 < e <= n(n-1)/2 if (argc != 4) { printf("This program requires 3 arguments: nodes, edges, threads\n"); return -1; } int nodes; int edges; int threads; nodes = atoi(argv[1]); edges = atoi(argv[2]); threads = atoi(argv[3]); struct graph* g = createGraph(nodes); // srand(time(NULL)); // seed srand(10); int i = 0; while (i < edges){ // i++ if edge got created if(addEdge(g,rand() % nodes, rand() % nodes)) i++; } // printGraph(g); // the code logic for timing in C is from https://www.geeksforgeeks.org/how-to-measure-time-taken-by-a-program-in-c/ clock_t t; t = clock(); colorGraph(threads,nodes,g); t = clock() - t; double time_taken = ((double)t)/CLOCKS_PER_SEC; printf("\n Coloring the graph took %f seconds \n", time_taken); findMaxDegree(g); findMaxColor(g); // max color varies but not by a lot. // this is perfectly acceptable because our algorithm is heuristic. }
visualize.c
/***************************************************************************** * x264: h264 encoder ***************************************************************************** * Copyright (C) 2005 Tuukka Toivonen <tuukkat@ee.oulu.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ /* * Some explanation of the symbols used: * Red/pink: intra block * Blue: inter block * Green: skip block * Yellow: B-block (not visualized properly yet) * * Motion vectors have black dot at their target (ie. at the MB center), * instead of arrowhead. The black dot is enclosed in filled diamond with radius * depending on reference frame number (one frame back = zero width, normal case). * * The intra blocks have generally lines drawn perpendicular * to the prediction direction, so for example, if there is a pink block * with horizontal line at the top of it, it is interpolated by assuming * luma to be vertically constant. * DC predicted blocks have both horizontal and vertical lines, * pink blocks with a diagonal line are predicted using the planar function. */ #include "common.h" #include "visualize.h" #include "display.h" #include <omp.h> typedef struct { int i_type; int i_partition; int i_sub_partition[4]; int i_intra16x16_pred_mode; int intra4x4_pred_mode[4][4]; int8_t ref[2][4][4]; /* [list][y][x] */ int16_t mv[2][4][4][2]; /* [list][y][x][mvxy] */ } visualize_t; /* {{{ [fold] char *get_string(const stringlist_t *sl, int entries, int code) */ /* Return string from stringlist corresponding to the given code */ #define GET_STRING(sl, code) get_string((sl), sizeof(sl)/sizeof(*(sl)), code) typedef struct { int code; char *string; } stringlist_t; static char *get_string(const stringlist_t *sl, int entries, int code) { int i; for (i=0; i<entries; i++) { if (sl[i].code==code) break; } return (i>=entries) ? "?" : sl[i].string; } /* }}} */ /* {{{ [fold] void mv(int x0, int y0, int16_t dmv[2], int ref, int zoom, char *col) */ /* Plot motion vector */ static void mv(int x0, int y0, int16_t dmv[2], int ref, int zoom, char *col) { int dx = dmv[0]; int dy = dmv[1]; int i; dx = (dx * zoom + 2) >> 2; /* Quarter pixel accurate MVs */ dy = (dy * zoom + 2) >> 2; disp_line(0, x0, y0, x0+dx, y0+dy); for (i=1; i<ref; i++){ disp_line(0, x0, y0-i, x0+i, y0); disp_line(0, x0+i, y0, x0, y0+i); disp_line(0, x0, y0+i, x0-i, y0); disp_line(0, x0-i, y0, x0, y0-i); } disp_setcolor("black"); disp_point(0, x0, y0); disp_setcolor(col); } /* }}} */ /* {{{ [fold] void x264_visualize_init( x264_t *h ) */ void x264_visualize_init( x264_t *h ) { int mb = h->sps->i_mb_width * h->sps->i_mb_height; h->visualize = x264_malloc(mb * sizeof(visualize_t)); } /* }}} */ /* {{{ [fold] void x264_visualize_mb( x264_t *h ) */ void x264_visualize_mb( x264_t *h ) { visualize_t *v = (visualize_t*)h->visualize + h->mb.i_mb_xy; int i, l, x, y; /* Save all data for the MB what we need for drawing the visualization */ v->i_type = h->mb.i_type; v->i_partition = h->mb.i_partition; for (i=0; i<4; i++) v->i_sub_partition[i] = h->mb.i_sub_partition[i]; for (y=0; y<4; y++) for (x=0; x<4; x++) v->intra4x4_pred_mode[y][x] = h->mb.cache.intra4x4_pred_mode[X264_SCAN8_0+y*8+x]; for (l=0; l<2; l++) for (y=0; y<4; y++) for (x=0; x<4; x++) { for (i=0; i<2; i++) { v->mv[l][y][x][i] = h->mb.cache.mv[l][X264_SCAN8_0+y*8+x][i]; } v->ref[l][y][x] = h->mb.cache.ref[l][X264_SCAN8_0+y*8+x]; } v->i_intra16x16_pred_mode = h->mb.i_intra16x16_pred_mode; } /* }}} */ /* {{{ [fold] void x264_visualize_close( x264_t *h ) */ void x264_visualize_close( x264_t *h ) { x264_free(h->visualize); } /* }}} */ /* {{{ [fold] void x264_visualize_show( x264_t *h ) */ /* Display visualization (block types, MVs) of the encoded frame */ /* FIXME: B-type MBs not handled yet properly */ void x264_visualize_show( x264_t *h ) { int mb_xy; static const stringlist_t mb_types[] = { /* Block types marked as NULL will not be drawn */ { I_4x4 , "red" }, { I_8x8 , "#ff5640" }, { I_16x16 , "#ff8060" }, { I_PCM , "violet" }, { P_L0 , "SlateBlue" }, { P_8x8 , "blue" }, { P_SKIP , "green" }, { B_DIRECT, "yellow" }, { B_L0_L0 , "yellow" }, { B_L0_L1 , "yellow" }, { B_L0_BI , "yellow" }, { B_L1_L0 , "yellow" }, { B_L1_L1 , "yellow" }, { B_L1_BI , "yellow" }, { B_BI_L0 , "yellow" }, { B_BI_L1 , "yellow" }, { B_BI_BI , "yellow" }, { B_8x8 , "yellow" }, { B_SKIP , "yellow" }, }; static const int waitkey = 1; /* Wait for enter after each frame */ static const int drawbox = 1; /* Draw box around each block */ static const int borders = 0; /* Display extrapolated borders outside frame */ static const int zoom = 2; /* Zoom factor */ static const int pad = 32; uint8_t *const frame = h->fdec->plane[0]; const int width = h->param.i_width; const int height = h->param.i_height; const int stride = h->fdec->i_stride[0]; if (borders) { disp_gray_zoom(0, frame - pad*stride - pad, width+2*pad, height+2*pad, stride, "fdec", zoom); } else { disp_gray_zoom(0, frame, width, height, stride, "fdec", zoom); } #pragma omp parallel for for( mb_xy = 0; mb_xy < h->sps->i_mb_width * h->sps->i_mb_height; mb_xy++ ) { visualize_t *const v = (visualize_t*)h->visualize + mb_xy; const int mb_y = mb_xy / h->sps->i_mb_width; const int mb_x = mb_xy % h->sps->i_mb_width; char *const col = GET_STRING(mb_types, v->i_type); int x = mb_x*16*zoom; int y = mb_y*16*zoom; int l = 0; unsigned int i, j; if (col==NULL) continue; if (borders) { x += pad*zoom; y += pad*zoom; } disp_setcolor(col); if (drawbox) disp_rect(0, x, y, x+16*zoom-1, y+16*zoom-1); if (v->i_type==P_L0 || v->i_type==P_8x8 || v->i_type==P_SKIP) { /* Predicted (inter) mode, with motion vector */ if (v->i_partition==D_16x16 || v->i_type==P_SKIP) { mv(x+8*zoom, y+8*zoom, v->mv[l][0][0], v->ref[l][0][0], zoom, col); } if (v->i_partition==D_16x8) { if (drawbox) disp_rect(0, x, y, x+16*zoom, y+8*zoom); mv(x+8*zoom, y+4*zoom, v->mv[l][0][0], v->ref[l][0][0], zoom, col); if (drawbox) disp_rect(0, x, y+8*zoom, x+16*zoom, y+16*zoom); mv(x+8*zoom, y+12*zoom, v->mv[l][2][0], v->ref[l][2][0], zoom, col); } if (v->i_partition==D_8x16) { if (drawbox) disp_rect(0, x, y, x+8*zoom, y+16*zoom); mv(x+4*zoom, y+8*zoom, v->mv[l][0][0], v->ref[l][0][0], zoom, col); if (drawbox) disp_rect(0, x+8*zoom, y, x+16*zoom, y+16*zoom); mv(x+12*zoom, y+8*zoom, v->mv[l][0][2], v->ref[l][0][2], zoom, col); } if (v->i_partition==D_8x8) { for (i=0; i<2; i++) for (j=0; j<2; j++) { int sp = v->i_sub_partition[i*2+j]; const int x0 = x + j*8*zoom; const int y0 = y + i*8*zoom; l = x264_mb_partition_listX_table[0][sp] ? 0 : 1; /* FIXME: not tested if this works */ if (IS_SUB8x8(sp)) { if (drawbox) disp_rect(0, x0, y0, x0+8*zoom, y0+8*zoom); mv(x0+4*zoom, y0+4*zoom, v->mv[l][2*i][2*j], v->ref[l][2*i][2*j], zoom, col); } if (IS_SUB8x4(sp)) { if (drawbox) disp_rect(0, x0, y0, x0+8*zoom, y0+4*zoom); if (drawbox) disp_rect(0, x0, y0+4*zoom, x0+8*zoom, y0+8*zoom); mv(x0+4*zoom, y0+2*zoom, v->mv[l][2*i][2*j], v->ref[l][2*i][2*j], zoom, col); mv(x0+4*zoom, y0+6*zoom, v->mv[l][2*i+1][2*j], v->ref[l][2*i+1][2*j], zoom, col); } if (IS_SUB4x8(sp)) { if (drawbox) disp_rect(0, x0, y0, x0+4*zoom, y0+8*zoom); if (drawbox) disp_rect(0, x0+4*zoom, y0, x0+8*zoom, y0+8*zoom); mv(x0+2*zoom, y0+4*zoom, v->mv[l][2*i][2*j], v->ref[l][2*i][2*j], zoom, col); mv(x0+6*zoom, y0+4*zoom, v->mv[l][2*i][2*j+1], v->ref[l][2*i][2*j+1], zoom, col); } if (IS_SUB4x4(sp)) { if (drawbox) disp_rect(0, x0, y0, x0+4*zoom, y0+4*zoom); if (drawbox) disp_rect(0, x0+4*zoom, y0, x0+8*zoom, y0+4*zoom); if (drawbox) disp_rect(0, x0, y0+4*zoom, x0+4*zoom, y0+8*zoom); if (drawbox) disp_rect(0, x0+4*zoom, y0+4*zoom, x0+8*zoom, y0+8*zoom); mv(x0+2*zoom, y0+2*zoom, v->mv[l][2*i][2*j], v->ref[l][2*i][2*j], zoom, col); mv(x0+6*zoom, y0+2*zoom, v->mv[l][2*i][2*j+1], v->ref[l][2*i][2*j+1], zoom, col); mv(x0+2*zoom, y0+6*zoom, v->mv[l][2*i+1][2*j], v->ref[l][2*i+1][2*j], zoom, col); mv(x0+6*zoom, y0+6*zoom, v->mv[l][2*i+1][2*j+1], v->ref[l][2*i+1][2*j+1], zoom, col); } } } } if (IS_INTRA(v->i_type) || v->i_type==I_PCM) { /* Intra coded */ if (v->i_type==I_16x16) { switch (v->i_intra16x16_pred_mode) { case I_PRED_16x16_V: disp_line(0, x+2*zoom, y+2*zoom, x+14*zoom, y+2*zoom); break; case I_PRED_16x16_H: disp_line(0, x+2*zoom, y+2*zoom, x+2*zoom, y+14*zoom); break; case I_PRED_16x16_DC: case I_PRED_16x16_DC_LEFT: case I_PRED_16x16_DC_TOP: case I_PRED_16x16_DC_128: disp_line(0, x+2*zoom, y+2*zoom, x+14*zoom, y+2*zoom); disp_line(0, x+2*zoom, y+2*zoom, x+2*zoom, y+14*zoom); break; case I_PRED_16x16_P: disp_line(0, x+2*zoom, y+2*zoom, x+8*zoom, y+8*zoom); break; } } if (v->i_type==I_4x4 || v->i_type==I_8x8) { const int di = v->i_type==I_8x8 ? 2 : 1; const int zoom2 = zoom * di; for (i=0; i<4; i+=di) for (j=0; j<4; j+=di) { const int x0 = x + j*4*zoom; const int y0 = y + i*4*zoom; if (drawbox) disp_rect(0, x0, y0, x0+4*zoom2, y0+4*zoom2); switch (v->intra4x4_pred_mode[i][j]) { case I_PRED_4x4_V: /* Vertical */ disp_line(0, x0+0*zoom2, y0+1*zoom2, x0+4*zoom2, y0+1*zoom2); break; case I_PRED_4x4_H: /* Horizontal */ disp_line(0, x0+1*zoom2, y0+0*zoom2, x0+1*zoom2, y0+4*zoom2); break; case I_PRED_4x4_DC: /* DC, average from top and left sides */ case I_PRED_4x4_DC_LEFT: case I_PRED_4x4_DC_TOP: case I_PRED_4x4_DC_128: disp_line(0, x0+1*zoom2, y0+1*zoom2, x0+4*zoom2, y0+1*zoom2); disp_line(0, x0+1*zoom2, y0+1*zoom2, x0+1*zoom2, y0+4*zoom2); break; case I_PRED_4x4_DDL: /* Topright-bottomleft */ disp_line(0, x0+0*zoom2, y0+0*zoom2, x0+4*zoom2, y0+4*zoom2); break; case I_PRED_4x4_DDR: /* Topleft-bottomright */ disp_line(0, x0+0*zoom2, y0+4*zoom2, x0+4*zoom2, y0+0*zoom2); break; case I_PRED_4x4_VR: /* Mix of topleft-bottomright and vertical */ disp_line(0, x0+0*zoom2, y0+2*zoom2, x0+4*zoom2, y0+1*zoom2); break; case I_PRED_4x4_HD: /* Mix of topleft-bottomright and horizontal */ disp_line(0, x0+2*zoom2, y0+0*zoom2, x0+1*zoom2, y0+4*zoom2); break; case I_PRED_4x4_VL: /* Mix of topright-bottomleft and vertical */ disp_line(0, x0+0*zoom2, y0+1*zoom2, x0+4*zoom2, y0+2*zoom2); break; case I_PRED_4x4_HU: /* Mix of topright-bottomleft and horizontal */ disp_line(0, x0+1*zoom2, y0+0*zoom2, x0+2*zoom2, y0+4*zoom2); break; } } } } } disp_sync(); if (waitkey) getchar(); } /* }}} */ //EOF
piTeste.c
// #include <stdio.h> // #include <omp.h> // #include <time.h> // #include <stdlib.h> // long long num_passos = 10; long long num_passos = 10000000000; double passo; int main(int argc, char** argv){ long long i; double x, pi, soma=0.0; passo = 1.0/(double)num_passos; #pragma omp parallel for private(x, i) shared(passo) reduction(+:soma) num_threads(6) for(i=0; i < num_passos; i++){ // printf("%lld", i); x = (i + 0.5)*passo; soma += 4.0/(1.0 + x*x); } pi = soma*passo; printf("O valor de PI é: %f\n", pi); return 0; }
GB_ijsort.c
//------------------------------------------------------------------------------ // GB_ijsort: sort an index array I and remove duplicates //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Sort an index array and remove duplicates. In MATLAB notation: /* [I1 I1k] = sort (I) ; Iduplicate = [(I1 (1:end-1) == I1 (2:end)), false] ; I2 = I1 (~Iduplicate) ; I2k = I1k (~Iduplicate) ; */ #include "GB_ij.h" #include "GB_sort.h" #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; \ GB_FREE_MEMORY (W0, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (W1, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (I1, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (I1k, ni, sizeof (GrB_Index)) ; \ } GrB_Info GB_ijsort ( const GrB_Index *restrict I, // size ni, where ni > 1 always holds int64_t *restrict p_ni, // : size of I, output: # of indices in I2 GrB_Index *restrict *p_I2, // size ni2, where I2 [0..ni2-1] // contains the sorted indices with duplicates removed. GrB_Index *restrict *p_I2k, // output array of size ni2 GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (I != NULL) ; ASSERT (p_ni != NULL) ; ASSERT (p_I2 != NULL) ; ASSERT (p_I2k != NULL) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Index *restrict I1 = NULL ; GrB_Index *restrict I1k = NULL ; GrB_Index *restrict I2 = NULL ; GrB_Index *restrict I2k = NULL ; int64_t *restrict W0 = NULL ; int64_t *restrict W1 = NULL ; int64_t ni = *p_ni ; ASSERT (ni > 1) ; int64_t *restrict Count = NULL ; // size ntasks+1 int ntasks = 0 ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (ni, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (I1, ni, sizeof (GrB_Index)) ; GB_MALLOC_MEMORY (I1k, ni, sizeof (GrB_Index)) ; if (I1 == NULL || I1k == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // copy I into I1 and construct I1k //-------------------------------------------------------------------------- GB_memcpy (I1, I, ni * sizeof (GrB_Index), nthreads) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < ni ; k++) { // the key is selected so that the last duplicate entry comes first in // the sorted result. It must be adjusted later, so that the kth entry // has a key equal to k. I1k [k] = (ni-k) ; } //-------------------------------------------------------------------------- // sort [I1 I1k] //-------------------------------------------------------------------------- if (nthreads == 1) { //---------------------------------------------------------------------- // sequential quicksort //---------------------------------------------------------------------- GB_qsort_2 ((int64_t *) I1, (int64_t *) I1k, ni) ; } else { //---------------------------------------------------------------------- // parallel mergesort //---------------------------------------------------------------------- GB_MALLOC_MEMORY (W0, ni, sizeof (int64_t)) ; GB_MALLOC_MEMORY (W1, ni, sizeof (int64_t)) ; if (W0 == NULL || W1 == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } GB_msort_2 ((int64_t *) I1, (int64_t *) I1k, W0, W1, ni, nthreads) ; GB_FREE_MEMORY (W0, ni, sizeof (int64_t)) ; GB_FREE_MEMORY (W1, ni, sizeof (int64_t)) ; } //-------------------------------------------------------------------------- // determine number of tasks to create //-------------------------------------------------------------------------- ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, ni) ; ntasks = GB_IMAX (ntasks, 1) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; if (Count == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // count unique entries in I1 //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t kfirst, klast, my_count = (tid == 0) ? 1 : 0 ; GB_PARTITION (kfirst, klast, ni, tid, ntasks) ; for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++) { if (I1 [k-1] != I1 [k]) { my_count++ ; } } Count [tid] = my_count ; } GB_cumsum (Count, ntasks, NULL, 1) ; int64_t ni2 = Count [ntasks] ; //-------------------------------------------------------------------------- // allocate the result I2 //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (I2 , ni2, sizeof (GrB_Index)) ; GB_MALLOC_MEMORY (I2k, ni2, sizeof (GrB_Index)) ; if (I2 == NULL || I2k == NULL) { // out of memory GB_FREE_WORK ; GB_FREE_MEMORY (I2 , ni2, sizeof (GrB_Index)) ; GB_FREE_MEMORY (I2k, ni2, sizeof (GrB_Index)) ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // construct the new list I2 from I1, removing duplicates //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t kfirst, klast, k2 = Count [tid] ; GB_PARTITION (kfirst, klast, ni, tid, ntasks) ; if (tid == 0) { // the first entry in I1 is never a duplicate I2 [k2] = I1 [0] ; I2k [k2] = (ni - I1k [0]) ; k2++ ; } for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++) { if (I1 [k-1] != I1 [k]) { I2 [k2] = I1 [k] ; I2k [k2] = ni - I1k [k] ; k2++ ; } } } //-------------------------------------------------------------------------- // check result: compare with single-pass, single-threaded algorithm //-------------------------------------------------------------------------- #ifdef GB_DEBUG { int64_t ni1 = 1 ; I1k [0] = ni - I1k [0] ; for (int64_t k = 1 ; k < ni ; k++) { if (I1 [ni1-1] != I1 [k]) { I1 [ni1] = I1 [k] ; I1k [ni1] = ni - I1k [k] ; ni1++ ; } } ASSERT (ni1 == ni2) ; for (int64_t k = 0 ; k < ni1 ; k++) { ASSERT (I1 [k] == I2 [k]) ; ASSERT (I1k [k] == I2k [k]) ; } } #endif //-------------------------------------------------------------------------- // free workspace and return the new sorted list //-------------------------------------------------------------------------- GB_FREE_WORK ; *(p_I2 ) = (GrB_Index *) I2 ; *(p_I2k) = (GrB_Index *) I2k ; *(p_ni ) = (int64_t ) ni2 ; return (GrB_SUCCESS) ; }
algo.c
/* * algo.c * * Created on: 2011-09-25 * Author: francis */ #define _GNU_SOURCE #include <stdlib.h> #include <stdio.h> #include <inttypes.h> #include <string.h> #include "algo.h" #include "chunk.h" #include "omp.h" int sigma(int n) { return (n + 1) * n; } struct cs { uint64_t checksum; } __attribute__((aligned(64))); int encode_fast(struct chunk *chunk) { // TODO int i; int area = chunk->area; int key = chunk->key; char *data = chunk->data; uint64_t checksum = 0; #pragma omp parallel for private(i) reduction(+:checksum) for (i = 0; i < area; i++) { data[i] = data[i] + key; checksum += data[i]; } chunk->checksum = checksum; return 0; } int encode_slow_a(struct chunk *chunk) { int i, j; uint64_t checksum = 0; #pragma omp parallel for private(i,j) reduction(+:checksum) for (i = 0; i < chunk->height; i++) { for (j = 0; j < chunk->width; j++) { int index = i * chunk->width + j; chunk->data[index] = chunk->data[index] + chunk->key; checksum += chunk->data[index]; } } chunk->checksum = checksum; return 0; } int encode_slow_b(struct chunk *chunk) { int i; int area = chunk->area; int key = chunk->key; char *data = chunk->data; uint64_t* checksums; int n; #pragma omp parallel private(i) { #pragma omp single { n = omp_get_num_threads(); checksums = calloc(n, sizeof(uint64_t)); } #pragma omp barrier int id = omp_get_thread_num(); for (i = id; i < area; i += n) { data[i] = data[i] + key; checksums[id] += data[i]; } #pragma omp barrier } chunk->checksum = 0; for (i = 0; i < n; i++) chunk->checksum += checksums[i]; return 0; } int encode_slow_c(struct chunk *chunk) { int i; int checksum = 0; char *data = chunk->data; int area = chunk->area; int key = chunk->key; #pragma omp parallel for for (i = 0; i < area; i++) { data[i] = data[i] + key; #pragma omp atomic checksum += data[i]; } chunk->checksum = checksum; return 0; } int encode_slow_d(struct chunk *chunk) { int i; int checksum = 0; char *data = chunk->data; int area = chunk->area; int key = chunk->key; #pragma omp parallel for for (i = 0; i < area; i++) { data[i] = data[i] + key; #pragma omp critical { checksum += data[i]; } } chunk->checksum = checksum; return 0; } int encode_slow_e(struct chunk *chunk) { int i, j; int checksum = 0; int width = chunk->width; int height = chunk->height; int key = chunk->key; char *data = chunk->data; #pragma omp parallel for private(i,j) reduction(+:checksum) for (i = 0; i < width; i++) { for (j = 0; j < height; j++) { int index = i + j * width; data[index] = data[index] + key; checksum += data[index]; } } chunk->checksum = checksum; return 0; } int encode_slow_f(struct chunk *chunk) { int i; int area = chunk->area; int key = chunk->key; char *data = chunk->data; struct cs* cs; int n; int sig; uint64_t checksum; #pragma omp parallel private(i, checksum) { #pragma omp single { n = omp_get_num_threads(); cs = calloc(n, sizeof(struct cs)); sig = sigma(n); } #pragma omp barrier checksum = 0; int id = omp_get_thread_num(); int start = (int) (((uint64_t)sigma(id)) * area / sig); int end = (int) (((uint64_t)sigma(id + 1)) * area / sig); for (i = start; i < end; i++) { data[i] = data[i] + key; checksum += data[i]; } #pragma omp barrier cs[id].checksum = checksum; } chunk->checksum = 0; for (i = 0; i < n; i++) chunk->checksum += cs[i].checksum; return 0; }
shape.c
#define PY_SSIZE_T_CLEAN #include <Python.h> #include "structmember.h" #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define NO_IMPORT_ARRAY #define PY_ARRAY_UNIQUE_SYMBOL MICPY_ARRAY_API #include <numpy/arrayobject.h> #include <numpy/arrayscalars.h> #include <numpy/npy_math.h> #include <numpy/npy_3kcompat.h> #include "npy_config.h" #define _MICARRAYMODULE #include "alloc.h" #include "arrayobject.h" #include "creators.h" #include "shape.h" #include "convert.h" #include "templ_common.h" /* for npy_mul_with_overflow_intp */ #include "common.h" /* for convert_shape_to_string */ static int _fix_unknown_dimension(PyArray_Dims *newshape, PyMicArrayObject *arr); static int _attempt_nocopy_reshape(PyMicArrayObject *self, int newnd, npy_intp* newdims, npy_intp *newstrides, int is_f_order); static void _putzero(int device, char *optr, PyObject *zero, PyArray_Descr *dtype); /*NUMPY_API * Resize (reallocate data). Only works if nothing else is referencing this * array and it is contiguous. If refcheck is 0, then the reference count is * not checked and assumed to be 1. You still must own this data and have no * weak-references and no base object. */ NPY_NO_EXPORT PyObject * PyMicArray_Resize(PyMicArrayObject *self, PyArray_Dims *newshape, int refcheck, NPY_ORDER order) { npy_intp oldsize, newsize; int new_nd=newshape->len, k, n, elsize; int refcnt; npy_intp* new_dimensions=newshape->ptr; npy_intp new_strides[NPY_MAXDIMS]; size_t sd; npy_intp *dimptr; char *new_data; npy_intp largest; if (!PyMicArray_ISONESEGMENT(self)) { PyErr_SetString(PyExc_ValueError, "resize only works on single-segment arrays"); return NULL; } if (PyMicArray_DESCR(self)->elsize == 0) { PyErr_SetString(PyExc_ValueError, "Bad data-type size."); return NULL; } newsize = 1; largest = NPY_MAX_INTP / PyMicArray_DESCR(self)->elsize; for(k = 0; k < new_nd; k++) { if (new_dimensions[k] == 0) { break; } if (new_dimensions[k] < 0) { PyErr_SetString(PyExc_ValueError, "negative dimensions not allowed"); return NULL; } newsize *= new_dimensions[k]; if (newsize <= 0 || newsize > largest) { return PyErr_NoMemory(); } } oldsize = PyMicArray_SIZE(self); if (oldsize != newsize) { if (!(PyMicArray_FLAGS(self) & NPY_ARRAY_OWNDATA)) { PyErr_SetString(PyExc_ValueError, "cannot resize this array: it does not own its data"); return NULL; } if (refcheck) { #ifdef PYPY_VERSION PyErr_SetString(PyExc_ValueError, "cannot resize an array with refcheck=True on PyPy.\n" "Use the resize function or refcheck=False"); return NULL; #else refcnt = PyArray_REFCOUNT(self); #endif /* PYPY_VERSION */ } else { refcnt = 1; } if ((refcnt > 2) || (PyMicArray_BASE(self) != NULL) || (((PyMicArrayObject *)self)->weakreflist != NULL)) { PyErr_SetString(PyExc_ValueError, "cannot resize an array that " "references or is referenced\n" "by another array in this way. Use the resize function"); return NULL; } if (newsize == 0) { sd = PyMicArray_DESCR(self)->elsize; } else { sd = newsize*PyMicArray_DESCR(self)->elsize; } /* Reallocate space if needed */ new_data = PyDataMemMic_RENEW(PyMicArray_DATA(self), sd, PyMicArray_DEVICE(self)); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); return NULL; } ((PyMicArrayObject *)self)->data = new_data; } if ((newsize > oldsize) && PyMicArray_ISWRITEABLE(self)) { /* Fill new memory with zeros */ elsize = PyMicArray_DESCR(self)->elsize; if (PyDataType_FLAGCHK(PyMicArray_DESCR(self), NPY_ITEM_REFCOUNT)) { PyObject *zero = PyInt_FromLong(0); char *optr; optr = PyMicArray_BYTES(self) + oldsize*elsize; n = newsize - oldsize; for (k = 0; k < n; k++) { _putzero(PyMicArray_DEVICE(self), (char *)optr, zero, PyMicArray_DESCR(self)); optr += elsize; } Py_DECREF(zero); } else{ void *addr = (void *) PyMicArray_BYTES(self) + oldsize * elsize; npy_intp fill_size = (newsize - oldsize) * elsize; #pragma omp target device(self->device) map(to:addr,fill_size) memset(addr, 0, fill_size); } } if (PyMicArray_NDIM(self) != new_nd) { /* Different number of dimensions. */ ((PyMicArrayObject *)self)->nd = new_nd; /* Need new dimensions and strides arrays */ dimptr = PyDimMem_RENEW(PyMicArray_DIMS(self), 3*new_nd); if (dimptr == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); return NULL; } ((PyMicArrayObject *)self)->dimensions = dimptr; ((PyMicArrayObject *)self)->strides = dimptr + new_nd; } /* make new_strides variable */ _array_fill_strides( new_strides, new_dimensions, new_nd, PyMicArray_DESCR(self)->elsize, PyMicArray_FLAGS(self), &(((PyMicArrayObject *)self)->flags)); memmove(PyMicArray_DIMS(self), new_dimensions, new_nd*sizeof(npy_intp)); memmove(PyMicArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp)); Py_RETURN_NONE; } /* * Returns a new array * with the new shape from the data * in the old array --- order-perspective depends on order argument. * copy-only-if-necessary */ /*NUMPY_API * New shape for an array */ NPY_NO_EXPORT PyObject * PyMicArray_Newshape(PyMicArrayObject *self, PyArray_Dims *newdims, NPY_ORDER order) { npy_intp i; npy_intp *dimensions = newdims->ptr; PyMicArrayObject *ret; int ndim = newdims->len; npy_bool same, incref = NPY_TRUE; npy_intp *strides = NULL; npy_intp newstrides[NPY_MAXDIMS]; int flags; if (order == NPY_ANYORDER) { order = PyMicArray_ISFORTRAN(self); } else if (order == NPY_KEEPORDER) { PyErr_SetString(PyExc_ValueError, "order 'K' is not permitted for reshaping"); return NULL; } /* Quick check to make sure anything actually needs to be done */ if (ndim == PyMicArray_NDIM(self)) { same = NPY_TRUE; i = 0; while (same && i < ndim) { if (PyMicArray_DIM(self,i) != dimensions[i]) { same = NPY_FALSE; } i++; } if (same) { return PyMicArray_View(self, NULL, NULL); } } /* * fix any -1 dimensions and check new-dimensions against old size */ if (_fix_unknown_dimension(newdims, self) < 0) { return NULL; } /* * sometimes we have to create a new copy of the array * in order to get the right orientation and * because we can't just re-use the buffer with the * data in the order it is in. * NPY_RELAXED_STRIDES_CHECKING: size check is unnecessary when set. */ if ((PyMicArray_SIZE(self) > 1) && ((order == NPY_CORDER && !PyMicArray_IS_C_CONTIGUOUS(self)) || (order == NPY_FORTRANORDER && !PyMicArray_IS_F_CONTIGUOUS(self)))) { int success = 0; success = _attempt_nocopy_reshape(self, ndim, dimensions, newstrides, order); if (success) { /* no need to copy the array after all */ strides = newstrides; } else { PyObject *newcopy; newcopy = PyMicArray_NewCopy(self, order); if (newcopy == NULL) { return NULL; } incref = NPY_FALSE; self = (PyMicArrayObject *)newcopy; } } /* We always have to interpret the contiguous buffer correctly */ /* Make sure the flags argument is set. */ flags = PyMicArray_FLAGS(self); if (ndim > 1) { if (order == NPY_FORTRANORDER) { flags &= ~NPY_ARRAY_C_CONTIGUOUS; flags |= NPY_ARRAY_F_CONTIGUOUS; } else { flags &= ~NPY_ARRAY_F_CONTIGUOUS; flags |= NPY_ARRAY_C_CONTIGUOUS; } } Py_INCREF(PyMicArray_DESCR(self)); ret = (PyMicArrayObject *)PyMicArray_NewFromDescr_int( PyMicArray_DEVICE(self), Py_TYPE(self), PyMicArray_DESCR(self), ndim, dimensions, strides, PyMicArray_DATA(self), flags, (PyObject *)self, 0, 1); if (ret == NULL) { goto fail; } if (incref) { Py_INCREF(self); } if (PyMicArray_SetBaseObject(ret, (PyObject *)self)) { Py_DECREF(ret); return NULL; } PyMicArray_UpdateFlags(ret, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS); return (PyObject *)ret; fail: if (!incref) { Py_DECREF(self); } return NULL; } /* For back-ward compatability -- Not recommended */ /*NUMPY_API * Reshape */ NPY_NO_EXPORT PyObject * PyMicArray_Reshape(PyMicArrayObject *self, PyObject *shape) { PyObject *ret; PyArray_Dims newdims; if (!PyArray_IntpConverter(shape, &newdims)) { return NULL; } ret = PyMicArray_Newshape(self, &newdims, NPY_CORDER); PyDimMem_FREE(newdims.ptr); return ret; } static void _putzero(int device, char *optr, PyObject *zero, PyArray_Descr *dtype) { if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { npy_intp num = dtype->elsize; target_memset(optr, 0, num, device); } else if (PyDataType_HASFIELDS(dtype)) { PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; Py_ssize_t pos = 0; while (PyDict_Next(dtype->fields, &pos, &key, &value)) { if NPY_TITLE_KEY(key, value) { continue; } if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { return; } _putzero(device, optr + offset, zero, new); } } return; } /* * attempt to reshape an array without copying data * * The requested newdims are not checked, but must be compatible with * the size of self, which must be non-zero. Other than that this * function should correctly handle all reshapes, including axes of * length 1. Zero strides should work but are untested. * * If a copy is needed, returns 0 * If no copy is needed, returns 1 and fills newstrides * with appropriate strides * * The "is_f_order" argument describes how the array should be viewed * during the reshape, not how it is stored in memory (that * information is in PyArray_STRIDES(self)). * * If some output dimensions have length 1, the strides assigned to * them are arbitrary. In the current implementation, they are the * stride of the next-fastest index. */ static int _attempt_nocopy_reshape(PyMicArrayObject *self, int newnd, npy_intp* newdims, npy_intp *newstrides, int is_f_order) { int oldnd; npy_intp olddims[NPY_MAXDIMS]; npy_intp oldstrides[NPY_MAXDIMS]; npy_intp last_stride; int oi, oj, ok, ni, nj, nk; oldnd = 0; /* * Remove axes with dimension 1 from the old array. They have no effect * but would need special cases since their strides do not matter. */ for (oi = 0; oi < PyMicArray_NDIM(self); oi++) { if (PyMicArray_DIMS(self)[oi]!= 1) { olddims[oldnd] = PyMicArray_DIMS(self)[oi]; oldstrides[oldnd] = PyMicArray_STRIDES(self)[oi]; oldnd++; } } /* oi to oj and ni to nj give the axis ranges currently worked with */ oi = 0; oj = 1; ni = 0; nj = 1; while (ni < newnd && oi < oldnd) { npy_intp np = newdims[ni]; npy_intp op = olddims[oi]; while (np != op) { if (np < op) { /* Misses trailing 1s, these are handled later */ np *= newdims[nj++]; } else { op *= olddims[oj++]; } } /* Check whether the original axes can be combined */ for (ok = oi; ok < oj - 1; ok++) { if (is_f_order) { if (oldstrides[ok+1] != olddims[ok]*oldstrides[ok]) { /* not contiguous enough */ return 0; } } else { /* C order */ if (oldstrides[ok] != olddims[ok+1]*oldstrides[ok+1]) { /* not contiguous enough */ return 0; } } } /* Calculate new strides for all axes currently worked with */ if (is_f_order) { newstrides[ni] = oldstrides[oi]; for (nk = ni + 1; nk < nj; nk++) { newstrides[nk] = newstrides[nk - 1]*newdims[nk - 1]; } } else { /* C order */ newstrides[nj - 1] = oldstrides[oj - 1]; for (nk = nj - 1; nk > ni; nk--) { newstrides[nk - 1] = newstrides[nk]*newdims[nk]; } } ni = nj++; oi = oj++; } /* * Set strides corresponding to trailing 1s of the new shape. */ if (ni >= 1) { last_stride = newstrides[ni - 1]; } else { last_stride = PyMicArray_ITEMSIZE(self); } if (is_f_order) { last_stride *= newdims[ni - 1]; } for (nk = ni; nk < newnd; nk++) { newstrides[nk] = last_stride; } return 1; } static void raise_reshape_size_mismatch(PyArray_Dims *newshape, PyMicArrayObject *arr) { PyObject *msg = PyUString_FromFormat("cannot reshape array of size %zd " "into shape ", PyMicArray_SIZE(arr)); PyObject *tmp = convert_shape_to_string(newshape->len, newshape->ptr, ""); PyUString_ConcatAndDel(&msg, tmp); if (msg != NULL) { PyErr_SetObject(PyExc_ValueError, msg); Py_DECREF(msg); } } static int _fix_unknown_dimension(PyArray_Dims *newshape, PyMicArrayObject *arr) { npy_intp *dimensions; npy_intp s_original = PyMicArray_SIZE(arr); npy_intp i_unknown, s_known; int i, n; dimensions = newshape->ptr; n = newshape->len; s_known = 1; i_unknown = -1; for (i = 0; i < n; i++) { if (dimensions[i] < 0) { if (i_unknown == -1) { i_unknown = i; } else { PyErr_SetString(PyExc_ValueError, "can only specify one unknown dimension"); return -1; } } else if (npy_mul_with_overflow_intp(&s_known, s_known, dimensions[i])) { raise_reshape_size_mismatch(newshape, arr); return -1; } } if (i_unknown >= 0) { if (s_known == 0 || s_original % s_known != 0) { raise_reshape_size_mismatch(newshape, arr); return -1; } dimensions[i_unknown] = s_original / s_known; } else { if (s_original != s_known) { raise_reshape_size_mismatch(newshape, arr); return -1; } } return 0; } /*NUMPY_API * * return a new view of the array object with all of its unit-length * dimensions squeezed out if needed, otherwise * return the same array. */ NPY_NO_EXPORT PyObject * PyMicArray_Squeeze(PyMicArrayObject *self) { PyMicArrayObject *ret; npy_bool unit_dims[NPY_MAXDIMS]; int idim, ndim, any_ones; npy_intp *shape; ndim = PyMicArray_NDIM(self); shape = PyMicArray_SHAPE(self); any_ones = 0; for (idim = 0; idim < ndim; ++idim) { if (shape[idim] == 1) { unit_dims[idim] = 1; any_ones = 1; } else { unit_dims[idim] = 0; } } /* If there were no ones to squeeze out, return the same array */ if (!any_ones) { Py_INCREF(self); return (PyObject *)self; } ret = (PyMicArrayObject *)PyMicArray_View(self, NULL, &PyMicArray_Type); if (ret == NULL) { return NULL; } PyMicArray_RemoveAxesInPlace(ret, unit_dims); /* * If self isn't not a base class ndarray, call its * __array_wrap__ method */ if (Py_TYPE(self) != &PyMicArray_Type) { PyMicArrayObject *tmp = PyMicArray_SubclassWrap( (PyMicArrayObject *) self, (PyMicArrayObject *) ret); Py_DECREF(ret); ret = tmp; } return (PyObject *)ret; } /* * Just like PyArray_Squeeze, but allows the caller to select * a subset of the size-one dimensions to squeeze out. */ NPY_NO_EXPORT PyObject * PyMicArray_SqueezeSelected(PyMicArrayObject *self, npy_bool *axis_flags) { PyMicArrayObject *ret; int idim, ndim, any_ones; npy_intp *shape; ndim = PyMicArray_NDIM(self); shape = PyMicArray_SHAPE(self); /* Verify that the axes requested are all of size one */ any_ones = 0; for (idim = 0; idim < ndim; ++idim) { if (axis_flags[idim] != 0) { if (shape[idim] == 1) { any_ones = 1; } else { PyErr_SetString(PyExc_ValueError, "cannot select an axis to squeeze out " "which has size not equal to one"); return NULL; } } } /* If there were no axes to squeeze out, return the same array */ if (!any_ones) { Py_INCREF(self); return (PyObject *)self; } ret = (PyMicArrayObject *)PyMicArray_View(self, NULL, &PyMicArray_Type); if (ret == NULL) { return NULL; } PyMicArray_RemoveAxesInPlace(ret, axis_flags); /* * If self isn't not a base class ndarray, call its * __array_wrap__ method */ if (Py_TYPE(self) != &PyMicArray_Type) { PyMicArrayObject *tmp = PyMicArray_SubclassWrap(self, ret); Py_DECREF(ret); ret = tmp; } return (PyObject *)ret; } /*NUMPY_API * SwapAxes */ NPY_NO_EXPORT PyObject * PyMicArray_SwapAxes(PyMicArrayObject *ap, int a1, int a2) { PyArray_Dims new_axes; npy_intp dims[NPY_MAXDIMS]; int n = PyMicArray_NDIM(ap); int i; if (a1 < 0) { a1 += n; } if (a2 < 0) { a2 += n; } if ((a1 < 0) || (a1 >= n)) { PyErr_SetString(PyExc_ValueError, "bad axis1 argument to swapaxes"); return NULL; } if ((a2 < 0) || (a2 >= n)) { PyErr_SetString(PyExc_ValueError, "bad axis2 argument to swapaxes"); return NULL; } for (i = 0; i < n; ++i) { dims[i] = i; } dims[a1] = a2; dims[a2] = a1; new_axes.ptr = dims; new_axes.len = n; return PyMicArray_Transpose(ap, &new_axes); } /*NUMPY_API * Return Transpose. */ NPY_NO_EXPORT PyObject * PyMicArray_Transpose(PyMicArrayObject *ap, PyArray_Dims *permute) { npy_intp *axes; npy_intp i, n; npy_intp permutation[NPY_MAXDIMS], reverse_permutation[NPY_MAXDIMS]; PyMicArrayObject *ret = NULL; int flags; if (permute == NULL) { n = PyMicArray_NDIM(ap); for (i = 0; i < n; i++) { permutation[i] = n-1-i; } } else { n = permute->len; axes = permute->ptr; if (n != PyMicArray_NDIM(ap)) { PyErr_SetString(PyExc_ValueError, "axes don't match array"); return NULL; } for (i = 0; i < n; i++) { reverse_permutation[i] = -1; } for (i = 0; i < n; i++) { int axis = axes[i]; if (check_and_adjust_axis(&axis, PyMicArray_NDIM(ap)) < 0) { return NULL; } if (reverse_permutation[axis] != -1) { PyErr_SetString(PyExc_ValueError, "repeated axis in transpose"); return NULL; } reverse_permutation[axis] = i; permutation[i] = axis; } } flags = PyMicArray_FLAGS(ap); /* * this allocates memory for dimensions and strides (but fills them * incorrectly), sets up descr, and points data at PyArray_DATA(ap). */ Py_INCREF(PyMicArray_DESCR(ap)); ret = (PyMicArrayObject *) PyMicArray_NewFromDescr(PyMicArray_DEVICE(ap), Py_TYPE(ap), PyMicArray_DESCR(ap), n, PyMicArray_DIMS(ap), NULL, PyMicArray_DATA(ap), flags, (PyObject *)ap); if (ret == NULL) { return NULL; } /* point at true owner of memory: */ Py_INCREF(ap); if (PyMicArray_SetBaseObject(ret, (PyObject *)ap) < 0) { Py_DECREF(ret); return NULL; } /* fix the dimensions and strides of the return-array */ for (i = 0; i < n; i++) { PyMicArray_DIMS(ret)[i] = PyMicArray_DIMS(ap)[permutation[i]]; PyMicArray_STRIDES(ret)[i] = PyMicArray_STRIDES(ap)[permutation[i]]; } PyMicArray_UpdateFlags(ret, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS | NPY_ARRAY_ALIGNED); return (PyObject *)ret; } /* * Sorts items so stride is descending, because C-order * is the default in the face of ambiguity. */ static int _npy_stride_sort_item_comparator(const void *a, const void *b) { npy_intp astride = ((const npy_stride_sort_item *)a)->stride, bstride = ((const npy_stride_sort_item *)b)->stride; /* Sort the absolute value of the strides */ if (astride < 0) { astride = -astride; } if (bstride < 0) { bstride = -bstride; } if (astride == bstride) { /* * Make the qsort stable by next comparing the perm order. * (Note that two perm entries will never be equal) */ npy_intp aperm = ((const npy_stride_sort_item *)a)->perm, bperm = ((const npy_stride_sort_item *)b)->perm; return (aperm < bperm) ? -1 : 1; } if (astride > bstride) { return -1; } return 1; } /*NUMPY_API * Ravel * Returns a contiguous array */ NPY_NO_EXPORT PyObject * PyMicArray_Ravel(PyMicArrayObject *arr, NPY_ORDER order) { PyArray_Dims newdim = {NULL,1}; npy_intp val[1] = {-1}; newdim.ptr = val; if (order == NPY_KEEPORDER) { /* This handles some corner cases, such as 0-d arrays as well */ if (PyMicArray_IS_C_CONTIGUOUS(arr)) { order = NPY_CORDER; } else if (PyMicArray_IS_F_CONTIGUOUS(arr)) { order = NPY_FORTRANORDER; } } else if (order == NPY_ANYORDER) { order = PyMicArray_ISFORTRAN(arr) ? NPY_FORTRANORDER : NPY_CORDER; } if (order == NPY_CORDER && PyMicArray_IS_C_CONTIGUOUS(arr)) { return PyMicArray_Newshape(arr, &newdim, NPY_CORDER); } else if (order == NPY_FORTRANORDER && PyMicArray_IS_F_CONTIGUOUS(arr)) { return PyMicArray_Newshape(arr, &newdim, NPY_FORTRANORDER); } /* For KEEPORDER, check if we can make a flattened view */ else if (order == NPY_KEEPORDER) { npy_stride_sort_item strideperm[NPY_MAXDIMS]; npy_intp stride; int i, ndim = PyMicArray_NDIM(arr); PyArray_CreateSortedStridePerm(PyMicArray_NDIM(arr), PyMicArray_STRIDES(arr), strideperm); /* The output array must be contiguous, so the first stride is fixed */ stride = PyMicArray_ITEMSIZE(arr); for (i = ndim-1; i >= 0; --i) { if (PyMicArray_DIM(arr, strideperm[i].perm) == 1) { /* A size one dimension does not matter */ continue; } if (strideperm[i].stride != stride) { break; } stride *= PyMicArray_DIM(arr, strideperm[i].perm); } /* If all the strides matched a contiguous layout, return a view */ if (i < 0) { PyMicArrayObject *ret; stride = PyMicArray_ITEMSIZE(arr); val[0] = PyMicArray_SIZE(arr); Py_INCREF(PyMicArray_DESCR(arr)); ret = (PyMicArrayObject *)PyMicArray_NewFromDescr( PyMicArray_DEVICE(arr), Py_TYPE(arr), PyMicArray_DESCR(arr), 1, val, &stride, PyMicArray_BYTES(arr), PyMicArray_FLAGS(arr), (PyObject *)arr); if (ret == NULL) { return NULL; } PyMicArray_UpdateFlags(ret, NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_F_CONTIGUOUS); Py_INCREF(arr); if (PyMicArray_SetBaseObject(ret, (PyObject *)arr) < 0) { Py_DECREF(ret); return NULL; } return (PyObject *)ret; } } return PyMicArray_Flatten(arr, order); } /*NUMPY_API * Flatten */ NPY_NO_EXPORT PyObject * PyMicArray_Flatten(PyMicArrayObject *a, NPY_ORDER order) { PyMicArrayObject *ret; npy_intp size; if (order == NPY_ANYORDER) { order = PyMicArray_ISFORTRAN(a) ? NPY_FORTRANORDER : NPY_CORDER; } size = PyMicArray_SIZE(a); Py_INCREF(PyMicArray_DESCR(a)); ret = (PyMicArrayObject *)PyMicArray_NewFromDescr(PyMicArray_DEVICE(a), Py_TYPE(a), PyMicArray_DESCR(a), 1, &size, NULL, NULL, 0, (PyObject *)a); if (ret == NULL) { return NULL; } if (PyMicArray_CopyAsFlat(ret, a, order) < 0) { Py_DECREF(ret); return NULL; } return (PyObject *)ret; } /* See shape.h for parameters documentation */ NPY_NO_EXPORT PyObject * build_shape_string(npy_intp n, npy_intp *vals) { npy_intp i; PyObject *ret, *tmp; /* * Negative dimension indicates "newaxis", which can * be discarded for printing if it's a leading dimension. * Find the first non-"newaxis" dimension. */ i = 0; while (i < n && vals[i] < 0) { ++i; } if (i == n) { return PyUString_FromFormat("()"); } else { ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]); if (ret == NULL) { return NULL; } } for (; i < n; ++i) { if (vals[i] < 0) { tmp = PyUString_FromString(",newaxis"); } else { tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]); } if (tmp == NULL) { Py_DECREF(ret); return NULL; } PyUString_ConcatAndDel(&ret, tmp); if (ret == NULL) { return NULL; } } tmp = PyUString_FromFormat(")"); PyUString_ConcatAndDel(&ret, tmp); return ret; } NPY_NO_EXPORT void PyMicArray_RemoveAxesInPlace(PyMicArrayObject *arr, npy_bool *flags) { PyArray_RemoveAxesInPlace((PyArrayObject *) arr, flags); }
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // use the id(...) matcher around the match expressions that match the nodes // you want to access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(id("child", recordDecl()))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the id(...) calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::ast_type_traits::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// If the provided matcher matches a node, binds the node to \c ID. /// /// FIXME: Do we want to support this now that we have bind()? template <typename T> internal::Matcher<T> id(StringRef ID, const internal::BindableMatcher<T> &InnerMatcher) { return InnerMatcher.bind(ID); } /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPublic()) /// matches 'int a;' AST_MATCHER(Decl, isPublic) { return Node.getAccess() == AS_public; } /// Matches protected C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isProtected()) /// matches 'int b;' AST_MATCHER(Decl, isProtected) { return Node.getAccess() == AS_protected; } /// Matches private C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPrivate()) /// matches 'int c;' AST_MATCHER(Decl, isPrivate) { return Node.getAccess() == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::Matcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::Matcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(const std::string &Name) { return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<ValueDecl> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = ast_type_traits::DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; }) /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches RecordDecl object that are spelled with "struct." /// /// Example matches S, but not C or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isStruct) { return Node.isStruct(); } /// Matches RecordDecl object that are spelled with "union." /// /// Example matches U, but not C or S. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isUnion) { return Node.isUnion(); } /// Matches RecordDecl object that are spelled with "class." /// /// Example matches C, but not S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isClass) { return Node.isClass(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches if the given method declaration is virtual. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(intgerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->GetTemporaryExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the Stmt AST node that is marked as being the structured-block /// of an OpenMP executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// \endcode /// /// ``stmt(isOMPStructuredBlock()))`` matches ``{}``. AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == OMPC_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == OMPC_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(32*t2-Nz-1020,1024)),ceild(8*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(16*t1+Nx+29,1024)),floord(32*t2+Nx+28,1024)),floord(8*t3+Nx+4,1024)),floord(32*t1-32*t2+Nz+Nx+27,1024));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),1024*t4+1022),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "libperf.h" #include "libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <locale.h> #if HAVE_MPI # include <mpi.h> #elif HAVE_RTE # include<rte.h> #endif #define MAX_BATCH_FILES 32 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; } test_type_t; struct perftest_context { ucx_perf_params_t params; const char *server_addr; int port; #if HAVE_MPI int mpi; #endif unsigned cpu; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:T:d:x:A:B" test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency"}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency"}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency"}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate"}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / message rate"}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / message rate"}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / message rate"}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate"}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate"}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate"}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "UCP tag match latency"}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP tag match bandwidth"}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "UCP put latency"}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP put bandwidth"}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP get latency / bandwidth / message rate"}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic add bandwidth / message rate"}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic fetch-and-add latency / bandwidth / message rate"}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic swap latency / bandwidth / message rate"}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "UCP atomic compare-and-swap latency / bandwidth / message rate"}, {NULL} }; static int safe_send(int sock, void *data, size_t size) { size_t total = 0; int ret; while (total < size) { ret = send(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("send() failed: %m"); return -1; } total += ret; } return 0; } static int safe_recv(int sock, void *data, size_t size) { size_t total = 0; int ret; while (total < size) { ret = recv(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("recv() failed: %m"); return -1; } total += ret; } return 0; } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final) { static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n"; static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n"; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *test_api_str; const char *test_data_str; test_type_t *test; unsigned i; if (ctx->flags & TEST_FLAG_PRINT_TEST) { for (test = tests; test->name; ++test) { if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) { break; } } if (test->name != NULL) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params)); } } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n"); printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void usage(struct perftest_context *ctx, const char *program) { test_type_t *test; printf("Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); #if HAVE_MPI printf("This test can be also launched as an MPI application\n"); #elif HAVE_RTE printf("This test can be also launched as an libRTE application\n"); #endif printf(" Common options:\n"); printf("\n"); printf(" Test options:\n"); printf(" -t <test> Test to run.\n"); for (test = tests; test->name; ++test) { printf(" %11s : %s.\n", test->name, test->desc); } printf("\n"); printf(" -D <layout>[,<layout>] Data layout. Default is \"short\" in UCT," " \"contig\" in UCP and previous one " "in batch mode. Second parameter is for " "receive side in UCP only.\n"); printf(" short : Use short messages API (cannot used for get).\n"); printf(" bcopy : Use copy-out API (cannot used for atomics).\n"); printf(" zcopy : Use zero-copy API (cannot used for atomics).\n"); printf(" contig : Use continuous datatype in UCP tests.\n"); printf(" iov : Use IOV datatype in UCP tests.\n"); printf("\n"); printf(" -d <device> Device to use for testing.\n"); printf(" -x <tl> Transport to use for testing.\n"); printf(" -c <cpu> Set affinity to this CPU. (off)\n"); printf(" -n <iters> Number of iterations to run. (%ld)\n", ctx->params.max_iter); printf(" -s <size> List of buffer sizes separated by comma, which " "make up a single message. Default is (%zu). " "For example, \"-s 16,48,8192,8192,14\"\n", ctx->params.msg_size_list[0]); printf(" -H <size> AM Header size. (%zu)\n", ctx->params.am_hdr_size); printf(" -w <iters> Number of warm-up iterations. (%zu)\n", ctx->params.warmup_iter); printf(" -W <count> Flow control window size, for active messages. (%u)\n", ctx->params.uct.fc_window); printf(" -O <count> Maximal number of uncompleted outstanding sends. (%u)\n", ctx->params.max_outstanding); printf(" -i <count> Distance between starting address of consecutive " "IOV entries. The same as UCT uct_iov_t stride.\n"); printf(" -N Use numeric formatting - thousands separator.\n"); printf(" -f Print only final numbers.\n"); printf(" -v Print CSV-formatted output.\n"); printf(" -p <port> TCP port to use for data exchange. (%d)\n", ctx->port); printf(" -b <batchfile> Batch mode. Read and execute tests from a file.\n"); printf(" Every line of the file is a test to run. " "The first word is the\n"); printf(" test name, and the rest are command-line " "arguments for the test.\n"); printf(" -M <thread> Thread support level for progress engine (single).\n"); printf(" single : Only the master thread can access.\n"); printf(" serialized : One thread can access at a time.\n"); printf(" multi : Multiple threads can access.\n"); printf(" -T <threads> Number of threads in the test (1); " "also implies \"-M multi\".\n"); printf(" -A <mode> Async progress mode. (thread)\n"); printf(" thread : Use separate progress thread.\n"); printf(" signal : Use signal based timer.\n"); printf(" -B Register memory with NONBLOCK flag.\n"); printf(" -C Use wildcard for tag tests.\n"); printf(" -S Use synchronous mode for tag sends.\n"); #if HAVE_MPI printf(" -P <0|1> Disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h Show this help message.\n"); printf("\n"); } static const char *__basename(const char *path) { const char *p = strrchr(path, '/'); return (p == NULL) ? path : p; } static ucs_status_t parse_ucp_datatype_params(const char *optarg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(optarg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(optarg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_message_sizes_params(const char *optarg, ucx_perf_params_t *params) { char *optarg_ptr, *optarg_ptr2; size_t token_num, token_it; const char delim = ','; optarg_ptr = (char *)optarg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; free(params->msg_size_list); /* free previously allocated buffer */ params->msg_size_list = malloc(sizeof(*params->msg_size_list) * token_num); if (NULL == params->msg_size_list) { return UCS_ERR_NO_MEMORY; } optarg_ptr = (char *)optarg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static void init_test_params(ucx_perf_params_t *params) { params->api = UCX_PERF_API_LAST; params->command = UCX_PERF_CMD_LAST; params->test_type = UCX_PERF_TEST_TYPE_LAST; params->thread_mode = UCS_THREAD_MODE_SINGLE; params->thread_count = 1; params->async_mode = UCS_ASYNC_MODE_THREAD; params->wait_mode = UCX_PERF_WAIT_MODE_LAST; params->max_outstanding = 1; params->warmup_iter = 10000; params->am_hdr_size = 8; params->alignment = ucs_get_page_size(); params->max_iter = 1000000l; params->max_time = 0.0; params->report_interval = 1.0; params->flags = UCX_PERF_TEST_FLAG_VERBOSE; params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->msg_size_cnt = 1; params->iov_stride = 0; params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; strcpy(params->uct.dev_name, "<none>"); strcpy(params->uct.tl_name, "<none>"); params->msg_size_list = malloc(sizeof(*params->msg_size_list) * params->msg_size_cnt); params->msg_size_list[0] = 8; } static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg) { test_type_t *test; char *optarg2 = NULL; switch (opt) { case 'd': ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name), "%s", optarg); return UCS_OK; case 'x': ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name), "%s", optarg); return UCS_OK; case 't': for (test = tests; test->name; ++test) { if (!strcmp(optarg, test->name)) { params->api = test->api; params->command = test->command; params->test_type = test->test_type; break; } } if (test->name == NULL) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (0 == strcmp(optarg, "short")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (0 == strcmp(optarg, "bcopy")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (0 == strcmp(optarg, "zcopy")) { params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(optarg, &params->ucp.send_datatype)) { optarg2 = strchr(optarg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->ucp.recv_datatype)) { return -1; } } } else { ucs_error("Invalid option argument for -D"); return -1; } return UCS_OK; case 'i': params->iov_stride = atol(optarg); return UCS_OK; case 'n': params->max_iter = atol(optarg); return UCS_OK; case 's': return parse_message_sizes_params(optarg, params); case 'H': params->am_hdr_size = atol(optarg); return UCS_OK; case 'W': params->uct.fc_window = atoi(optarg); return UCS_OK; case 'O': params->max_outstanding = atoi(optarg); return UCS_OK; case 'w': params->warmup_iter = atol(optarg); return UCS_OK; case 'o': params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'S': params->flags |= UCX_PERF_TEST_FLAG_TAG_SYNC; return UCS_OK; case 'M': if (0 == strcmp(optarg, "single")) { params->thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (0 == strcmp(optarg, "serialized")) { params->thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (0 == strcmp(optarg, "multi")) { params->thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->thread_count = atoi(optarg); params->thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; case 'A': if (0 == strcmp(optarg, "thread")) { params->async_mode = UCS_ASYNC_MODE_THREAD; return UCS_OK; } else if (0 == strcmp(optarg, "signal")) { params->async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t read_batch_file(FILE *batch_file, ucx_perf_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("Invalid argument in batch file: -%c, status(%d):\"%s\"", c, status, ucs_status_string(status)); return status; } } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); init_test_params(&ctx->params); ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; #if HAVE_MPI ctx->mpi = !isatty(0); #endif optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = strdup(optarg); } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; ctx->cpu = atoi(optarg); break; case 'P': #if HAVE_MPI ctx->mpi = atoi(optarg); break; #endif case 'h': usage(ctx, __basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, __basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group) { #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned sync; sync = magic; safe_send(group->connfd, &sync, sizeof(unsigned)); sync = 0; safe_recv(group->connfd, &sync, sizeof(unsigned)); ucs_assert(sync == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size)); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size)); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (ret < 0) { ucs_error("setsockopt(SO_REUSEADDR) failed: %m"); status = UCS_ERR_INVALID_PARAM; goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); safe_recv(connfd, &ctx->params, sizeof(ctx->params)); if (ctx->params.msg_size_cnt) { ctx->params.msg_size_list = malloc(sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt); if (NULL == ctx->params.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } safe_recv(connfd, ctx->params.msg_size_list, sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt); } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params)); if (ctx->params.msg_size_cnt) { safe_send(sockfd, ctx->params.msg_size_list, sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = &ctx->sock_rte_group; ctx->params.rte = &sock_rte; ctx->params.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if HAVE_MPI static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group) { #pragma omp master MPI_Barrier(MPI_COMM_WORLD); #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; #elif HAVE_RTE static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group) { #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { ucs_trace_func(""); #if HAVE_MPI int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; #elif HAVE_RTE rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.rte_group = group; ctx->params.rte = &ext_rte; ctx->params.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #if HAVE_MPI MPI_Finalize(); #elif HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { cpu_set_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { if (ctx->cpu >= nr_cpus) { ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } CPU_SET(ctx->cpu, &cpuset); ret = sched_setaffinity(0, sizeof(cpuset), &cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = sched_getaffinity(0, sizeof(cpuset), &cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t run_test_recurs(struct perftest_context *ctx, ucx_perf_params_t *parent_params, unsigned depth) { ucx_perf_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(parent_params, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } params = *parent_params; while ((status = read_batch_file(batch_file, &params, &ctx->test_names[depth])) == UCS_OK) { status = run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); if ((NULL == parent_params->msg_size_list) && (NULL != params.msg_size_list)) { free(params.msg_size_list); params.msg_size_list = NULL; } } fclose(batch_file); return UCS_OK; } static ucs_status_t run_test(struct perftest_context *ctx) { ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int rte = 0; int ret; /* Parse command line */ if (parse_opts(&ctx, argc, argv) != UCS_OK) { ret = -127; goto out; } #ifdef __COVERITY__ /* coverity[dont_call] */ rte = rand(); /* Shut up deadcode error */ #endif #if HAVE_MPI /* Don't try MPI when running interactively */ if (ctx.mpi && (MPI_Init(&argc, &argv) == 0)) { rte = 1; } #elif HAVE_RTE rte = 1; #endif status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out; } /* Create RTE */ status = (rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out: if (ctx.params.msg_size_list) { free(ctx.params.msg_size_list); } return ret; }
randFeature.c
#include <math.h> #include <time.h> #include <stdio.h> #include <limits.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include "ReadLibSVM.h" #include "WriteLibSVM.h" #include "Feature_matrix.h" #include "Random.h" typedef enum { Gaussian, Laplace, ProdLaplace } KERNEL; int main(int argc, char* argv[]){ if(argc < 1+7){ printf("Usage: [inTrain] [inTest] [outTrain] [outTest] [r] [sigma] [KernelType]\n"); printf("KernelType: {Gauss, Laplace, ProdLaplace}\n"); return -1; } int i; //loop index int idx = 0; char* trainFile = argv[++idx]; char* testFile = argv[++idx]; char* trainOut = argv[++idx]; char* testOut = argv[++idx]; int r = atoi(argv[++idx]); double sigma = atof(argv[++idx]); char* Kernel = argv[++idx]; KERNEL mKernel; if (strcmp(Kernel, "Gauss") == 0) { mKernel = Gaussian; } else if (strcmp(Kernel, "Laplace") == 0) { mKernel = Laplace; } else if (strcmp(Kernel, "ProdLaplace") == 0) { mKernel = ProdLaplace; } else { printf("KRR_OneVsOne. Error: unidentified kernel type!\n"); return -1; } // Read in X = Xtrain (n*d), y = ytrain (n*1), // and X0 = Xtest (m*d), y0 = ytest (m*1) double *Xtrain = NULL, *Xtest = NULL; double *ytrain = NULL, *ytest = NULL; int n = 0, m = 0, d = 0; if (ReadLibSVM(trainFile, &Xtrain, &ytrain, &d, &n) == -1) { return -1; } if (ReadLibSVM(testFile, &Xtest, &ytest, &d, &m) == -1) { return -1; } printf("Generate fourier feature: finish loading data\n"); // Seed the random number genrator int Seed = 0; srandom(Seed); // Generate random numbers w and b // w = random_distribution(d*r)/sigma // b = rand(1,r)*2*pi double *w = (double *)malloc(d*r*sizeof(double)); double *b = (double *)malloc(r*sizeof(double)); switch (mKernel) { case Gaussian: StandardNormal(w, d*r); break; case Laplace: for (i = 0; i < r; i++) { MultivariateStudentT1(w+i*d, d); } break; case ProdLaplace: StudentT1(w, d*r); break; } #pragma omp parallel for private(i) for (i = 0; i < d*r; i++) { w[i] /= sigma; } // make sure w is the same as other codes /*printf("w = \n"); for (i = 0;i < r; i++){ for (j = 0;j< d; j++){ printf(" %g", w[d*j+i]); } printf("\n"); }*/ UniformRandom01(b, r); #pragma omp parallel for private(i) for (i = 0; i < r; i++) { b[i] *= TWO_PI; } // make sure b is the same as other codes /*printf("b = \n"); for (i = 0;i < r; i++){ printf(" %g\n", b[i]); }*/ printf("Generate fourier feature: finish generating w and b\n"); // Generate feature matrix Z given X. Size n*r double *Ztrain = (double *)malloc(n*r*sizeof(double)); ComputeFeatureMatrix(n, d, r, Xtrain, w, b, Ztrain, sigma); double *Ztest = (double *)malloc(m*r*sizeof(double)); ComputeFeatureMatrix(m, d, r, Xtest, w, b, Ztest, sigma); printf("Generate fourier feature: finish computing feature matrix\n"); // Write fourier feature matrix in LibSVM format if (WriteLibSVM(trainOut, Ztrain, ytrain, r, n) == -1){ return -1; } if (WriteLibSVM(testOut, Ztest, ytest, r, m) == -1){ return -1; } printf("Generate fourier feature: finish storing data\n"); free(Xtrain); free(ytrain); free(Xtest); free(ytest); free(w); free(b); free(Ztrain); free(Ztest); return 0; }
GB_unaryop__lnot_int16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_fp64 // op(A') function: GB_tran__lnot_int16_fp64 // C type: int16_t // A type: double // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ double #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z ; GB_CAST_SIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_fp64 ( int16_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__first_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int8) // A.*B function (eWiseMult): GB (_AemultB_08__first_int8) // A.*B function (eWiseMult): GB (_AemultB_02__first_int8) // A.*B function (eWiseMult): GB (_AemultB_04__first_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int8) // A*D function (colscale): GB (_AxD__first_int8) // D*A function (rowscale): GB (_DxB__first_int8) // C+=B function (dense accum): GB (_Cdense_accumB__first_int8) // C+=b function (dense accum): GB (_Cdense_accumb__first_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
RunOvlayBorders.c
/*****************************************************************************\ * RunOvlayBorders.c * * AUTHOR : Felipe Belem * DATE : 2021-02-28 * LICENSE : MIT License * EMAIL : felipe.belem@ic.unicamp.br \*****************************************************************************/ #include "ift.h" #include "iftArgs.h" void usage(); iftImage *ovlayBorders (const iftImage *orig_img, const iftImage *label_img, const float thick, const iftFColor rgb); void readImgInputs (const iftArgs *args, iftImage **img, iftImage **labels, const char **path); void readOptArgs (const iftArgs *args, float *thick, iftFColor *rgb); void writeOvlayImage (const iftImage *ovlay_img, const char *path); int main(int argc, char const *argv[]) { //-------------------------------------------------------------------------// bool has_req, has_help; iftArgs *args; args = iftCreateArgs(argc, argv); has_req = iftExistArg(args, "img") && iftExistArg(args, "labels") && iftExistArg(args, "out"); has_help = iftExistArg(args, "help"); if(has_req == false || has_help == true) { usage(); iftDestroyArgs(&args); return EXIT_FAILURE; } //-------------------------------------------------------------------------// const char *OVLAY_PATH; float thick; iftImage *img, *label_img, *ovlay_img; iftFColor rgb; readImgInputs(args, &img, &label_img, &OVLAY_PATH); readOptArgs(args, &thick, &rgb); iftDestroyArgs(&args); ovlay_img = ovlayBorders(img, label_img, thick, rgb); iftDestroyImage(&img); iftDestroyImage(&label_img); iftWriteImageByExt(ovlay_img, OVLAY_PATH); iftDestroyImage(&ovlay_img); return EXIT_SUCCESS; } void usage() { const int SKIP_IND = 15; // For indentation purposes printf("\nThe required parameters are:\n"); printf("%-*s %s\n", SKIP_IND, "--img", "Input image"); printf("%-*s %s\n", SKIP_IND, "--labels", "Input label"); printf("%-*s %s\n", SKIP_IND, "--out", "Output border overlayed image"); printf("\nThe optional parameters are:\n"); printf("%-*s %s\n", SKIP_IND, "--rgb", "Comma-separated normalized RGB border color. Default: 0,0,0"); printf("%-*s %s\n", SKIP_IND, "--thick", "Border thickness. Default: 1.0"); printf("%-*s %s\n", SKIP_IND, "--help", "Prints this message"); printf("\n"); } iftImage *ovlayBorders (const iftImage *orig_img, const iftImage *label_img, const float thick, const iftFColor rgb) { #if IFT_DEBUG //-----------------------------------------------------------// assert(orig_img != NULL); assert(label_img != NULL); iftVerifyImageDomains(orig_img, label_img, "ovlayBorders"); assert(thick > 0); #endif //------------------------------------------------------------------// int depth, norm_val; iftImage *ovlay_img; iftAdjRel *A; iftColor RGB, YCbCr; A = iftCircular(thick); depth = iftImageDepth(orig_img); norm_val = iftMaxImageRange(depth); ovlay_img = iftCreateColorImage(orig_img->xsize, orig_img->ysize, orig_img->zsize, depth); RGB.val[0] = rgb.val[0] * norm_val; RGB.val[1] = rgb.val[1] * norm_val; RGB.val[2] = rgb.val[2] * norm_val; YCbCr = iftRGBtoYCbCr(RGB, norm_val); #if IFT_OMP //-------------------------------------------------------------// #pragma omp parallel for #endif //------------------------------------------------------------------// for(int p = 0; p < ovlay_img->n; ++p) { bool is_border; int i; iftVoxel p_vxl; is_border = false; p_vxl = iftGetVoxelCoord(ovlay_img, p); i = 0; while(is_border == false && i < A->n) { iftVoxel adj_vxl; adj_vxl = iftGetAdjacentVoxel(A, p_vxl, i); if(iftValidVoxel(ovlay_img, adj_vxl) == true) { int adj_idx; adj_idx = iftGetVoxelIndex(ovlay_img, adj_vxl); if(label_img->val[p] != label_img->val[adj_idx]) is_border = true; } ++i; } if(is_border == true) { ovlay_img->val[p] = YCbCr.val[0]; ovlay_img->Cb[p] = YCbCr.val[1]; ovlay_img->Cr[p] = YCbCr.val[2]; } else { ovlay_img->val[p] = orig_img->val[p]; if(iftIsColorImage(orig_img) == true) { ovlay_img->Cb[p] = orig_img->Cb[p]; ovlay_img->Cr[p] = orig_img->Cr[p]; } } } iftDestroyAdjRel(&A); if(depth != 8) iftConvertNewBitDepth(&ovlay_img, 8); return ovlay_img; } void readImgInputs (const iftArgs *args, iftImage **img, iftImage **labels, const char **path) { #if IFT_DEBUG //-----------------------------------------------------------// assert(args != NULL); assert(img != NULL); assert(labels != NULL); assert(path != NULL); #endif //------------------------------------------------------------------// const char *PATH; if(iftHasArgVal(args, "img") == true) { PATH = iftGetArg(args, "img"); (*img) = iftReadImageByExt(PATH); } else iftError("No image path was given", "readImgInputs"); if(iftHasArgVal(args, "labels") == true) { PATH = iftGetArg(args, "labels"); (*labels) = iftReadImageByExt(PATH); } else iftError("No label image path was given", "readImgInputs"); iftVerifyImageDomains(*img, *labels, "readImgInputs"); if(iftHasArgVal(args, "out") == true) (*path) = iftGetArg(args, "out"); else iftError("No output image path was given", "readImgInputs"); } void readOptArgs (const iftArgs *args, float *thick, iftFColor *rgb) { #if IFT_DEBUG //-----------------------------------------------------------// assert(args != NULL); assert(thick != NULL); assert(rgb != NULL); #endif //------------------------------------------------------------------// if(iftExistArg(args, "thick") == true) { if(iftHasArgVal(args, "thick") == true) (*thick) = atoi(iftGetArg(args, "thick")); else iftError("No border thickness was given", "readOptArgs"); } else (*thick) = 1.0; if(iftExistArg(args, "rgb") == true) { if(iftHasArgVal(args, "rgb") == true) { const char *VAL; int i; char *tmp, *tok; VAL = iftGetArg(args, "rgb"); tmp = iftCopyString(VAL); tok = strtok(tmp, ","); i = 0; while(tok != NULL && i < 3) { float c; c = atof(tok); if(c >= 0 && c <= 1) (*rgb).val[i] = c; else iftError("The color should be within [0,1]", "readOptArgs"); tok = strtok(NULL, ","); i++; } if((tok != NULL && i == 3) || (tok == NULL && i < 2)) iftError("Three colors are required for the RGB", "readOptArgs"); free(tmp); } else iftError("No normalized RGB color was given", "readOptArgs"); } else (*rgb).val[0] = (*rgb).val[1] = (*rgb).val[2] = 0.0; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define HOST_MAX_TEAMS 128 #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double * pA = malloc(N*sizeof(double)); int fail = 0; INIT(); // // Test: if clause // ZERO(A); int num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; // the number of teams started is implementation dependent int actual_teams = -1; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams if(0) map(tofrom:actual_teams) { if(omp_get_team_num() == 0) actual_teams = omp_get_num_teams(); A[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < actual_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: device clause // ZERO(A); num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams device(0) map(tofrom:actual_teams) { if(omp_get_team_num() == 0) actual_teams = omp_get_num_teams(); A[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < actual_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: map clause // ZERO(pA); num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams map(pA[:N]) map(tofrom:actual_teams) { if(omp_get_team_num() == 0) actual_teams = omp_get_num_teams(); pA[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < actual_teams ; i++) if (pA[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, pA[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: num_teams and omp_get_team_num() // ZERO(A); num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) { A[omp_get_team_num()] += omp_get_team_num(); } } for (int i = 0 ; i < num_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: thread_limit and omp_get_thread_num() // ZERO(A); fail = 0; int num_threads = omp_is_initial_device() ? HOST_MAX_TEAMS : 256; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(1) thread_limit(num_threads) #pragma omp parallel { int tid = omp_get_thread_num(); A[tid] += (double) tid; } } for (int i = 0 ; i < num_threads ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: if statement in teams region // ZERO(A); fail = 0; num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 512; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) { if (omp_get_team_num() % 2 == 0) { int teid = omp_get_team_num(); A[teid] += (double) 1; } else { int teid = omp_get_team_num(); A[teid] += (double) 2; } } } for (int i = 0 ; i < num_teams ; i++) { if (i % 2 == 0) { if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } } else if (A[i] != 2*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) 2*TRIALS, A[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); /* // */ /* // Test: num_teams and thread_limit by simulating a distribute pragma */ /* // */ /* ZERO(A); */ /* fail = 0; */ /* for (int t = 0 ; t < TRIALS ; t++) { */ /* #pragma omp target teams num_teams(2) thread_limit(496) */ /* { */ /* if (omp_get_team_num() == 0) { */ /* #pragma omp parallel */ /* { */ /* A[omp_get_team_num()*496+omp_get_thread_num()] += omp_get_thread_num(); */ /* if(omp_get_thread_num() == 498) printf("teid = %d, tid = %d, accessing %d\n", omp_get_team_num(), omp_get_thread_num(), omp_get_team_num()*496+omp_get_thread_num()); */ /* } */ /* } else { */ /* #pragma omp parallel */ /* { */ /* if(omp_get_thread_num() == 0) */ /* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */ /* A[omp_get_team_num()*496+omp_get_thread_num()] -= omp_get_thread_num(); */ /* if(omp_get_thread_num() == 0) */ /* printf("teid = %d, tid = %d: A= %lf\n", omp_get_team_num(), omp_get_thread_num(), A[omp_get_team_num()*496+omp_get_thread_num()]); */ /* } */ /* } */ /* } */ /* } */ /* for (int i = 0 ; i < 992 ; i++) { */ /* if (i < 496) { */ /* if (A[i] != i*TRIALS) { */ /* printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); */ /* fail = 1; */ /* } */ /* } else if(i >= 496) */ /* if (A[i] != -((i-496)*TRIALS)) { */ /* printf("Error at %d, h = %lf, d = %lf\n", i, (double) -((i-496)*TRIALS), A[i]); */ /* fail = 1; */ /* } */ /* } */ /* if(fail) printf("Failed\n"); */ /* else printf("Succeeded\n"); */ // // Test: private // ZERO(A); fail = 0; int a = 10; num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) private(a) { a = omp_get_team_num(); A[omp_get_team_num()] += a; } } for (int i = 0 ; i < num_teams ; i++) if (A[i] != i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) i*TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); fail = 0; a = 10; num_teams = omp_is_initial_device() ? HOST_MAX_TEAMS : 256; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams num_teams(num_teams) firstprivate(a) { a += omp_get_team_num(); A[omp_get_team_num()] += a; } } for (int i = 0 ; i < num_teams ; i++) if (A[i] != 10+i*TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) (10+i*TRIALS), A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/hashmap.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/shear.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" /* Numerous internal routines for image distortions. */ static inline double MagickMin(const double x,const double y) { return( x < y ? x : y); } static inline double MagickMax(const double x,const double y) { return( x > y ? x : y); } static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortImageMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortImageMethod *method,const size_t number_arguments, const double *arguments,size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ if ( number_arguments <= 1 && (number_arguments-1)%cp_size != 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid number of args: order [CPs]..."); return((double *) NULL); } i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) ResetMagickMemory(distort_args,0,12*sizeof(double)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); tmp_image->image_info=image->image_info; /* preserve global options */ if (image->alpha_trait != BlendPixelTrait) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel,exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. distort alpha channel separately */ Image *resize_alpha; (void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_alpha == (Image *) NULL) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); tmp_image->image_info=image->image_info; /* preserve global options */ (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel, exception); (void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel, exception); (void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp, MagickTrue,0,0,exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save,exception); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortImageMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image,DistortImageMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); /* Handle Special Compound Distortions */ if ( method == ResizeDistortion ) { if ( number_arguments != 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t)arguments[0], (size_t)arguments[1], exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff = GenerateCoefficients(image, &method, number_arguments, arguments, 0, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if ( IfStringTrue(GetImageArtifact(image,"verbose")) ) { register ssize_t i; char image_gen[MaxTextExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case PerspectiveDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr, "Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i<4; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for (; i<7; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n", coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n", coeff[8] < 0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: (void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", 0.5-coeff[3], 0.5-coeff[7]); (void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if ( coeff[9] != 0 ) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n", -2*coeff[9], coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n", coeff[9]); } else (void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4], coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n", -coeff[1], coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case BilinearReverseDistortion: #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n", coeff[0],(unsigned long) nterms); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n yy ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n"); for ( i=0; i<5; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n", coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n", coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", -coeff[2], -coeff[3]); (void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1], coeff[7] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] ); (void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] ); (void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] ); (void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc,yc; /* NOTE: This does the barrel roll in pixel coords not image coords ** The internal distortion must do it in image coordinates, ** so that is what the center coeff (8,9) is given in. */ xc = ((double)image->columns-1.0)/2.0 + image->page.x; yc = ((double)image->rows-1.0)/2.0 + image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n", coeff[8]-0.5, coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[4],coeff[5],coeff[6],coeff[7]); (void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s", "-set option:distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) return((Image *) NULL); /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse) { distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace,exception); if (distort_image->background_color.alpha_trait == BlendPixelTrait) distort_image->alpha_trait=BlendPixelTrait; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ResampleFilter **restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetPixelInfo(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; PixelInfo pixel, /* pixel color to assign to distorted image */ invalid; /* the color to assign when distort result is invalid */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register ssize_t i; register Quantum *restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'matte_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; invalid=distort_image->matte_color; if (distort_image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&invalid); /* what about other color spaces? */ for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5; if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 1 /*if ( i == 0 && j == 0 )*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelInfoPixel(distort_image,&invalid,q); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel, exception); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelInfoPixel(distort_image,&pixel,q); } q+=GetPixelChannels(distort_image); } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DistortImage) #endif proceed=SetImageProgress(image,DistortImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff = (double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; double angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); angle=degrees; while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod, exception); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const SparseColorMethod method,const size_t number_arguments, const double *arguments,ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); /* Determine number of color values needed per control point */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortImageMethod distort_method; distort_method=(DistortImageMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if ( IfStringTrue(GetImageArtifact(image,"verbose")) ) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; PixelInfo pixel; /* pixel to assign to distorted image */ register ssize_t i; register Quantum *restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { GetPixelInfoPixel(image,q,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) pixel.alpha = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) pixel.alpha = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=0.0; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=0.0; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=0.0; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=0.0; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) pixel.alpha=0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red += arguments[x++]*weight; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green += arguments[x++]*weight; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue += arguments[x++]*weight; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black += arguments[x++]*weight; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) pixel.alpha += arguments[x++]*weight; denominator += weight; } if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red/=denominator; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green/=denominator; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue/=denominator; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black/=denominator; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) pixel.alpha/=denominator; break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for (k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red*=QuantumRange; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green*=QuantumRange; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue*=QuantumRange; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black*=QuantumRange; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) pixel.alpha*=QuantumRange; SetPixelInfoPixel(sparse_image,&pixel,q); q+=GetPixelChannels(sparse_image); } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SparseColorImage) #endif proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
GB_binop__le_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__le_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__le_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__le_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp32) // A*D function (colscale): GB (_AxD__le_fp32) // D*A function (rowscale): GB (_DxB__le_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__le_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__le_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp32) // C=scalar+B GB (_bind1st__le_fp32) // C=scalar+B' GB (_bind1st_tran__le_fp32) // C=A+scalar GB (_bind2nd__le_fp32) // C=A'+scalar GB (_bind2nd_tran__le_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_FP32 || GxB_NO_LE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__le_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cgeadd.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeadd.c, normal z -> c, Fri Sep 28 17:38:05 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_geadd * * Performs an addition of two general rectangular matrices similarly to the * pcgeadd() function from the PBLAS library: * * \f[ B = \alpha * op( A ) + \beta * B, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^H, \f] * * alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or * n-by-m matrix depending on the value of transa and B an m-by-n matrix. * ******************************************************************************* * * @param[in] transa * Specifies whether the matrix A is non-transposed, transposed, or * conjugate transposed * - PlasmaNoTrans: op( A ) = A * - PlasmaTrans: op( A ) = A^T * - PlasmaConjTrans: op( A ) = A^H * * @param[in] m * Number of rows of the matrices op( A ) and B. * m >= 0. * * @param[in] n * Number of columns of the matrices op( A ) and B. * n >= 0. * * @param[in] alpha * Scalar factor of A. * * @param[in] pA * Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans * and m otherwise. * * @param[in] lda * Leading dimension of the array A. lda >= max(1,l), where l is m * when transa = PlasmaNoTrans and n otherwise. * * @param[in] beta * Scalar factor of B. * * @param[in,out] pB * Matrix of size ldb-by-n. * On exit, B = alpha * op( A ) + beta * B * * @param[in] ldb * Leading dimension of the array B. * ldb >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_cgeadd * @sa plasma_cgeadd * @sa plasma_dgeadd * @sa plasma_sgeadd * ******************************************************************************/ int plasma_cgeadd(plasma_enum_t transa, int m, int n, plasma_complex32_t alpha, plasma_complex32_t *pA, int lda, plasma_complex32_t beta, plasma_complex32_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (pA == NULL) { plasma_error("NULL A"); return -5; } int am, an; if (transa == PlasmaNoTrans) { am = m; an = n; } else { am = n; an = m; } int bm = m; int bn = n; if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -6; } if (pB == NULL) { plasma_error("NULL B"); return -8; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -9; } // quick return if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geadd(plasma, PlasmaComplexFloat, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); // Call tile async function. plasma_omp_cgeadd(transa, alpha, A, beta, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request); plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_geadd * * Performs an addition of two general rectangular matrices similarly to the * pcgeadd() function from the PBLAS library. Non-blocking tile version of * plasma_cgeadd(). May return before the computation is finished. Operates on * matrices stored by tiles. All matrices are passed through descriptors. All * dimensions are taken from the descriptors. Allows for pipelining of * operations at runtime. * ******************************************************************************* * * @param[in] transa * Specifies whether the matrix A is non-transposed, transposed, or * conjugate transposed * - PlasmaNoTrans: op( A ) = A * - PlasmaTrans: op( A ) = A^T * - PlasmaConjTrans: op( A ) = A^H * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] beta * The scalar beta. * * @param[in,out] B * Descriptor of matrix B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check the * sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cgeadd * @sa plasma_omp_cgeadd * @sa plasma_omp_dgeadd * @sa plasma_omp_sgeadd * ******************************************************************************/ void plasma_omp_cgeadd(plasma_enum_t transa, plasma_complex32_t alpha, plasma_desc_t A, plasma_complex32_t beta, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int am = transa == PlasmaNoTrans ? A.m : A.n; if ((alpha == 0.0 || am == 0) && beta == 1.0) return; // Call the parallel function. plasma_pcgeadd(transa, alpha, A, beta, B, sequence, request); }
GB_binop__gt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__gt_uint8 // A.*B function (eWiseMult): GB_AemultB__gt_uint8 // A*D function (colscale): GB_AxD__gt_uint8 // D*A function (rowscale): GB_DxB__gt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__gt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__gt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_uint8 // C=scalar+B GB_bind1st__gt_uint8 // C=scalar+B' GB_bind1st_tran__gt_uint8 // C=A+scalar GB_bind2nd__gt_uint8 // C=A'+scalar GB_bind2nd_tran__gt_uint8 // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__gt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__gt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__gt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__gt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif